code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import sens_calc as sens
import sys, os
from astropy.io import fits
import aplpy
from astropy.wcs import WCS
plt.ion()
telescope = sys.argv[1]
rootdir = '.'
bands = np.array([250, 350, 500]) # microns
t_map = np.array([10., 5., 3., 1]) # hours
t_dust = 15.0 # K
mapsize = 0.5**2 # deg^2
blast06_beam_area = sens.arcsec2toDeg2(sens.A_beam_06) # deg^2
blastTNG_beam_area = sens.arcsec2toDeg2(sens.A_beam_tng) # deg^2
blast_beam_per_pix = blast06_beam_area / blastTNG_beam_area
iras_100_beam = sens.arcsec2toDeg2(2.0 * 60.0**2) # deg^2
sig_p = 0.01 # 1 percent
def galContour(obj_name, filepath, band_idx, IRAS = False):
print filepath
print "Band =", bands[band_idx]
# Read in the HDUlist from the FITS file:
hdulist = fits.open(filepath)
# Print some info on what's in it:
# Read in the data from the Image HDU:
data = hdulist[0].data
mapsize = 0.5**2 # deg^2
# Minimum observable intensity (MJy/str) achievable by BLAST-TNG
# by observing a map of map_size (deg^2), with 1-sigma error bars in
# pol fraction, sig_p, in t hours."""
contour_levels = sens.I_min(mapsize, sig_p, t_map, band_idx)
print contour_levels
Npix = data[data >= np.min(contour_levels)].size
print "N pix:", Npix
N_blast_beams = Npix * blast_beam_per_pix
if IRAS:
N_blast_beams = Npix * iras_100_beam / blastTNG_beam_area
f_ratio = sens.f_nu_T(sens.c/(bands[band_idx] * 1.0e-6), t_dust) /\
sens.f_nu_T(sens.c/(100.0e-6), t_dust)
print "f(100)/f(" + str(bands[band_idx]) + ") =", f_ratio
data *= f_ratio
minval = np.nanmin(data)
maxval = np.nanmax(data)
sig = np.std(data)
mean = np.mean(data)
print "I min, I max =", maxval, minval, "MJy"
print "Number of BLAST beams =", N_blast_beams[band_idx]
for i in range(len(t_map)):
print "t (hr) =", t_map[i], ": I =",contour_levels[i], "MJy/str"
print
return contour_levels
def contourPlot(obj_name, filepath, band_idx, contour_levels):
fig = plt.figure(figsize = (10.24, 7.68))
hdulist = fits.open(filepath)
data = hdulist[0].data
wcs = WCS(hdulist[0].header)
cen_coord = wcs.wcs.crval
hdu = fits.PrimaryHDU(data, header=wcs.to_header())
f = aplpy.FITSFigure(hdu, figure = fig)
f.recenter(cen_coord[0], cen_coord[1], width = 40.0, height = 40.0)
f.show_contour(filepath, cmap = matplotlib.cm.viridis, filled = True, levels = contour_levels, alpha = 0.5)
f.set_theme('publication')
ax = plt.gca()
ax.set_facecolor("k")
f.show_colorscale(cmap = 'gray')
f.frame.set_linewidth(1) # points
f.frame.set_color('black')
f.set_yaxis_coord_type('latitude')
f.tick_labels.set_xformat('dd.dd')
f.set_xaxis_coord_type('longitude')
f.tick_labels.set_yformat('dd.dd')
f.axis_labels.set_font(size = 16)
f.tick_labels.set_font(size = 14)
f.add_colorbar()
f.colorbar.set_location('bottom')
f.colorbar.set_axis_label_font(size = 'large')
f.colorbar.set_axis_label_text('MJy sr^-1')
f.colorbar.set_axis_label_rotation(90)
f.colorbar.show()
f.set_title(obj_name + ", " + str(bands[band_idx]) + r" $\mu$m Intensity", size = 16)
#plt.tight_layout()
return f
if telescope == 'blast':
for i in range(len(bands)):
#levels = galContour('NGC 1808', rootdir + '/ngc1808_' + str(bands[i]) + '.fits', i)
#contourPlot('NGC 1808', rootdir + '/ngc1808_' + str(bands[i]) + '.fits', i, levels)
levels = galContour('NGC 1566', rootdir + '/ngc1566_' + str(bands[i]) + '.fits', i)
f = contourPlot('NGC 1566', rootdir + '/ngc1808_' + str(bands[i]) + '.fits', i, levels)
if telescope == 'iras':
for i in range(len(bands)):
galContour('M 83', rootdir + '/m83_iras_100um.fits', i, IRAS = True)
galContour('NGC 4945', rootdir + '/ngc4945_iras_100um.fits', i, IRAS = True)
galContour('NGC 1808', rootdir + '/ngc1808_iras_100um.fits', i, IRAS = True)
galContour('NGC 6744', rootdir + '/ngc6744_iras_100um.fits', i, IRAS = True)
galContour('NGC 5068', rootdir + '/ngc5068_iras_100um.fits', i, IRAS = True)
galContour('NGC 2835', rootdir + '/ngc2835_iras_100um.fits', i, IRAS = True)
| sbg2133/miscellaneous_projects | galaxies/plot_gals_time.py | Python | gpl-3.0 | 4,294 |
# Copyright (C) 2013-2014 2ndQuadrant Italia (Devise.IT S.r.l.)
#
# This file is part of Barman.
#
# Barman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Barman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Barman. If not, see <http://www.gnu.org/licenses/>.
import mock
import pytest
from barman import output
from barman.infofile import BackupInfo
from barman.testing_helpers import mock_backup_info, mock_backup_ext_info
from barman.utils import pretty_size
def teardown_module(module):
"""
Set the output API to a functional state, after testing it
"""
output.set_output_writer(output.DEFAULT_WRITER)
#noinspection PyMethodMayBeStatic
class TestOutputAPI(object):
@staticmethod
def _mock_writer():
# install a fresh mocked output writer
writer = mock.Mock()
output.set_output_writer(writer)
# reset the error status
output.error_occurred = False
return writer
#noinspection PyProtectedMember,PyUnresolvedReferences
@mock.patch.dict(output.AVAILABLE_WRITERS, mock=mock.Mock())
def test_set_output_writer_close(self):
old_writer = mock.Mock()
output.set_output_writer(old_writer)
assert output._writer == old_writer
args = ('1', 'two')
kwargs = dict(three=3, four=5)
output.set_output_writer('mock', *args, **kwargs)
old_writer.close.assert_called_once_with()
output.AVAILABLE_WRITERS['mock'].assert_called_once_with(*args,
**kwargs)
def test_debug(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test message'
output.debug(msg)
# logging test
for record in caplog.records():
assert record.levelname == 'DEBUG'
assert record.name == __name__
assert msg in caplog.text()
# writer test
assert not writer.error_occurred.called
writer.debug.assert_called_once_with(msg)
# global status test
assert not output.error_occurred
def test_debug_with_args(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
output.debug(msg, *args)
# logging test
for record in caplog.records():
assert record.levelname == 'DEBUG'
assert record.name == __name__
assert msg % args in caplog.text()
# writer test
assert not writer.error_occurred.called
writer.debug.assert_called_once_with(msg, *args)
# global status test
assert not output.error_occurred
def test_debug_error(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test message'
output.debug(msg, is_error=True)
# logging test
for record in caplog.records():
assert record.levelname == 'DEBUG'
assert record.name == __name__
assert msg in caplog.text()
# writer test
writer.error_occurred.called_once_with()
writer.debug.assert_called_once_with(msg)
# global status test
assert output.error_occurred
def test_debug_with_kwargs(self):
# preparation
self._mock_writer()
with pytest.raises(TypeError):
output.debug('message', bad_arg=True)
def test_info(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test message'
output.info(msg)
# logging test
for record in caplog.records():
assert record.levelname == 'INFO'
assert record.name == __name__
assert msg in caplog.text()
# writer test
assert not writer.error_occurred.called
writer.info.assert_called_once_with(msg)
# global status test
assert not output.error_occurred
def test_info_with_args(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
output.info(msg, *args)
# logging test
for record in caplog.records():
assert record.levelname == 'INFO'
assert record.name == __name__
assert msg % args in caplog.text()
# writer test
assert not writer.error_occurred.called
writer.info.assert_called_once_with(msg, *args)
# global status test
assert not output.error_occurred
def test_info_error(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test message'
output.info(msg, is_error=True)
# logging test
for record in caplog.records():
assert record.levelname == 'INFO'
assert record.name == __name__
assert msg in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.info.assert_called_once_with(msg)
# global status test
assert output.error_occurred
def test_warning(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test message'
output.warning(msg)
# logging test
for record in caplog.records():
assert record.levelname == 'WARNING'
assert record.name == __name__
assert msg in caplog.text()
# writer test
assert not writer.error_occurred.called
writer.warning.assert_called_once_with(msg)
# global status test
assert not output.error_occurred
def test_warning_with_args(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
output.warning(msg, *args)
# logging test
for record in caplog.records():
assert record.levelname == 'WARNING'
assert record.name == __name__
assert msg % args in caplog.text()
# writer test
assert not writer.error_occurred.called
writer.warning.assert_called_once_with(msg, *args)
# global status test
assert not output.error_occurred
def test_warning_error(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test message'
output.warning(msg, is_error=True)
# logging test
for record in caplog.records():
assert record.levelname == 'WARNING'
assert record.name == __name__
assert msg in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.warning.assert_called_once_with(msg)
# global status test
assert output.error_occurred
def test_error(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test message'
output.error(msg)
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
assert msg in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.error.assert_called_once_with(msg)
# global status test
assert output.error_occurred
def test_error_with_args(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
output.error(msg, *args)
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
assert msg % args in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.error.assert_called_once_with(msg, *args)
# global status test
assert output.error_occurred
def test_error_with_ignore(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
output.error(msg, ignore=True, *args)
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
assert msg % args in caplog.text()
# writer test
assert not writer.error_occurred.called
writer.error.assert_called_once_with(msg, *args)
# global status test
assert not output.error_occurred
def test_exception(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test message'
try:
raise ValueError('test exception')
except ValueError:
output.exception(msg)
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
assert msg in caplog.text()
assert 'Traceback' in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.exception.assert_called_once_with(msg)
# global status test
assert output.error_occurred
def test_exception_with_args(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
try:
raise ValueError('test exception')
except ValueError:
output.exception(msg, *args)
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
assert msg % args in caplog.text()
assert 'Traceback' in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.exception.assert_called_once_with(msg, *args)
# global status test
assert output.error_occurred
def test_exception_with_ignore(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
try:
raise ValueError('test exception')
except ValueError:
output.exception(msg, ignore=True, *args)
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
assert msg % args in caplog.text()
assert 'Traceback' in caplog.text()
# writer test
assert not writer.error_occurred.called
writer.exception.assert_called_once_with(msg, *args)
# global status test
assert not output.error_occurred
def test_exception_with_raise(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
try:
raise ValueError('test exception')
except ValueError:
with pytest.raises(ValueError):
output.exception(msg, raise_exception=True, *args)
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
assert msg % args in caplog.text()
assert 'Traceback' in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.exception.assert_called_once_with(msg, *args)
# global status test
assert output.error_occurred
def test_exception_with_raise_object(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
try:
raise ValueError('test exception')
except ValueError:
with pytest.raises(KeyError):
output.exception(msg, raise_exception=KeyError(), *args)
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
assert msg % args in caplog.text()
assert 'Traceback' in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.exception.assert_called_once_with(msg, *args)
# global status test
assert output.error_occurred
def test_exception_with_raise_class(self, caplog):
# preparation
writer = self._mock_writer()
msg = 'test format %02d %s'
args = (1, '2nd')
try:
raise ValueError('test exception')
except ValueError:
with pytest.raises(KeyError):
output.exception(msg, raise_exception=KeyError, *args)
assert msg % args in caplog.text()
assert 'Traceback' in caplog.text()
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert record.name == __name__
# writer test
writer.error_occurred.assert_called_once_with()
writer.exception.assert_called_once_with(msg, *args)
# global status test
assert output.error_occurred
def test_init(self):
# preparation
writer = self._mock_writer()
args = ('1', 'two')
kwargs = dict(three=3, four=5)
output.init('command', *args, **kwargs)
output.init('another_command')
# writer test
writer.init_command.assert_called_once_with(*args, **kwargs)
writer.init_another_command.assert_called_once_with()
@mock.patch('sys.exit')
def test_init_bad_command(self, exit_mock, caplog):
# preparation
writer = self._mock_writer()
del writer.init_bad_command
output.init('bad_command')
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert 'bad_command' in caplog.text()
assert 'Traceback' in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.exception.assert_called_once()
# exit with error
assert exit_mock.called
assert exit_mock.call_count == 1
assert exit_mock.call_args[0] != 0
def test_result(self):
# preparation
writer = self._mock_writer()
args = ('1', 'two')
kwargs = dict(three=3, four=5)
output.result('command', *args, **kwargs)
output.result('another_command')
# writer test
writer.result_command.assert_called_once_with(*args, **kwargs)
writer.result_another_command.assert_called_once_with()
@mock.patch('sys.exit')
def test_result_bad_command(self, exit_mock, caplog):
# preparation
writer = self._mock_writer()
del writer.result_bad_command
output.result('bad_command')
# logging test
for record in caplog.records():
assert record.levelname == 'ERROR'
assert 'bad_command' in caplog.text()
assert 'Traceback' in caplog.text()
# writer test
writer.error_occurred.assert_called_once_with()
writer.exception.assert_called_once()
# exit with error
assert exit_mock.called
assert exit_mock.call_count == 1
assert exit_mock.call_args[0] != 0
def test_close(self):
# preparation
writer = self._mock_writer()
output.close()
writer.close.called_once_with()
@mock.patch('sys.exit')
def test_close_and_exit(self, exit_mock):
# preparation
writer = self._mock_writer()
output.close_and_exit()
writer.close.called_once_with()
exit_mock.assert_called_once_with(0)
@mock.patch('sys.exit')
def test_close_and_exit_with_error(self, exit_mock):
# preparation
writer = self._mock_writer()
output.error_occurred = True
output.close_and_exit()
writer.close.called_once_with()
assert exit_mock.called
assert exit_mock.call_count == 1
assert exit_mock.call_args[0] != 0
#noinspection PyMethodMayBeStatic
class TestConsoleWriter(object):
def test_debug(self, capsys):
writer = output.ConsoleOutputWriter(debug=True)
msg = 'test message'
writer.debug(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'DEBUG: ' + msg + '\n'
msg = 'test arg %s'
args = ('1st',)
writer.debug(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'DEBUG: ' + msg % args + '\n'
msg = 'test args %d %s'
args = (1, 'two')
writer.debug(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'DEBUG: ' + msg % args + '\n'
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.debug(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'DEBUG: ' + msg % kwargs + '\n'
def test_debug_disabled(self, capsys):
writer = output.ConsoleOutputWriter(debug=False)
msg = 'test message'
writer.debug(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.debug(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.debug(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.debug(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
def test_info_verbose(self, capsys):
writer = output.ConsoleOutputWriter(quiet=False)
msg = 'test message'
writer.info(msg)
(out, err) = capsys.readouterr()
assert out == msg + '\n'
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.info(msg, *args)
(out, err) = capsys.readouterr()
assert out == msg % args + '\n'
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.info(msg, *args)
(out, err) = capsys.readouterr()
assert out == msg % args + '\n'
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.info(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == msg % kwargs + '\n'
assert err == ''
def test_info_quiet(self, capsys):
writer = output.ConsoleOutputWriter(quiet=True)
msg = 'test message'
writer.info(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.info(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.info(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.info(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
def test_warning(self, capsys):
writer = output.ConsoleOutputWriter()
msg = 'test message'
writer.warning(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'WARNING: ' + msg + '\n'
msg = 'test arg %s'
args = ('1st',)
writer.warning(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'WARNING: ' + msg % args + '\n'
msg = 'test args %d %s'
args = (1, 'two')
writer.warning(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'WARNING: ' + msg % args + '\n'
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.warning(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'WARNING: ' + msg % kwargs + '\n'
def test_error(self, capsys):
writer = output.ConsoleOutputWriter()
msg = 'test message'
writer.error(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'ERROR: ' + msg + '\n'
msg = 'test arg %s'
args = ('1st',)
writer.error(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'ERROR: ' + msg % args + '\n'
msg = 'test args %d %s'
args = (1, 'two')
writer.error(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'ERROR: ' + msg % args + '\n'
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.error(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'ERROR: ' + msg % kwargs + '\n'
def test_exception(self, capsys):
writer = output.ConsoleOutputWriter()
msg = 'test message'
writer.exception(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'EXCEPTION: ' + msg + '\n'
msg = 'test arg %s'
args = ('1st',)
writer.exception(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'EXCEPTION: ' + msg % args + '\n'
msg = 'test args %d %s'
args = (1, 'two')
writer.exception(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'EXCEPTION: ' + msg % args + '\n'
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.exception(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == 'EXCEPTION: ' + msg % kwargs + '\n'
def test_init_check(self, capsys):
writer = output.ConsoleOutputWriter()
server = 'test'
writer.init_check(server)
(out, err) = capsys.readouterr()
assert out == 'Server %s:\n' % server
assert err == ''
def test_result_check_ok(self, capsys):
writer = output.ConsoleOutputWriter()
output.error_occurred = False
server = 'test'
check = 'test check'
writer.result_check(server, check, True)
(out, err) = capsys.readouterr()
assert out == '\t%s: OK\n' % check
assert err == ''
assert not output.error_occurred
def test_result_check_ok_hint(self, capsys):
writer = output.ConsoleOutputWriter()
output.error_occurred = False
server = 'test'
check = 'test check'
hint = 'do something'
writer.result_check(server, check, True, hint)
(out, err) = capsys.readouterr()
assert out == '\t%s: OK (%s)\n' % (check, hint)
assert err == ''
assert not output.error_occurred
def test_result_check_failed(self, capsys):
writer = output.ConsoleOutputWriter()
output.error_occurred = False
server = 'test'
check = 'test check'
writer.result_check(server, check, False)
(out, err) = capsys.readouterr()
assert out == '\t%s: FAILED\n' % check
assert err == ''
assert output.error_occurred
def test_result_check_failed_hint(self, capsys):
writer = output.ConsoleOutputWriter()
output.error_occurred = False
server = 'test'
check = 'test check'
hint = 'do something'
writer.result_check(server, check, False, hint)
(out, err) = capsys.readouterr()
assert out == '\t%s: FAILED (%s)\n' % (check, hint)
assert err == ''
assert output.error_occurred
def test_init_list_backup(self):
writer = output.ConsoleOutputWriter()
writer.init_list_backup('test server')
assert not writer.minimal
writer.init_list_backup('test server', True)
assert writer.minimal
def test_result_list_backup(self, capsys):
# mock the backup info
bi = mock_backup_info()
backup_size = 12345
wal_size = 54321
retention_status = 'test status'
writer = output.ConsoleOutputWriter()
# test minimal
writer.init_list_backup(bi.server_name, True)
writer.result_list_backup(bi, backup_size, wal_size, retention_status)
writer.close()
(out, err) = capsys.readouterr()
assert writer.minimal
assert bi.backup_id in out
assert err == ''
# test status=DONE output
writer.init_list_backup(bi.server_name, False)
writer.result_list_backup(bi, backup_size, wal_size, retention_status)
writer.close()
(out, err) = capsys.readouterr()
assert not writer.minimal
assert bi.server_name in out
assert bi.backup_id in out
assert str(bi.end_time.ctime()) in out
for name, _, location in bi.tablespaces:
assert '%s:%s' % (name, location)
assert 'Size: ' + pretty_size(backup_size) in out
assert 'WAL Size: ' + pretty_size(wal_size) in out
assert err == ''
# test status = FAILED output
bi = mock_backup_info(status=BackupInfo.FAILED)
writer.init_list_backup(bi.server_name, False)
writer.result_list_backup(bi, backup_size, wal_size, retention_status)
writer.close()
(out, err) = capsys.readouterr()
assert not writer.minimal
assert bi.server_name in out
assert bi.backup_id in out
assert bi.status in out
def test_result_show_backup(self, capsys):
# mock the backup ext info
ext_info = mock_backup_ext_info()
writer = output.ConsoleOutputWriter()
# test minimal
writer.result_show_backup(ext_info)
writer.close()
(out, err) = capsys.readouterr()
assert ext_info['server_name'] in out
assert ext_info['backup_id'] in out
assert ext_info['status'] in out
assert str(ext_info['end_time']) in out
for name, _, location in ext_info['tablespaces']:
assert '%s: %s' % (name, location) in out
assert (pretty_size(ext_info['size'] + ext_info['wal_size'])) in out
assert (pretty_size(ext_info['wal_until_next_size'])) in out
# TODO: this test can be expanded
assert err == ''
def test_result_show_backup_error(self, capsys):
# mock the backup ext info
msg = 'test error message'
ext_info = mock_backup_ext_info(status=BackupInfo.FAILED, error=msg)
writer = output.ConsoleOutputWriter()
# test minimal
writer.result_show_backup(ext_info)
writer.close()
(out, err) = capsys.readouterr()
assert ext_info['server_name'] in out
assert ext_info['backup_id'] in out
assert ext_info['status'] in out
assert str(ext_info['end_time']) not in out
assert msg in out
assert err == ''
def test_init_status(self, capsys):
writer = output.ConsoleOutputWriter()
server = 'test'
writer.init_status(server)
(out, err) = capsys.readouterr()
assert out == 'Server %s:\n' % server
assert err == ''
def test_result_status(self, capsys):
writer = output.ConsoleOutputWriter()
server = 'test'
name = 'test name'
description = 'test description'
message = 'test message'
writer.result_status(server, name, description, message)
(out, err) = capsys.readouterr()
assert out == '\t%s: %s\n' % (description, message)
assert err == ''
def test_result_status_non_str(self, capsys):
writer = output.ConsoleOutputWriter()
server = 'test'
name = 'test name'
description = 'test description'
message = 1
writer.result_status(server, name, description, message)
(out, err) = capsys.readouterr()
assert out == '\t%s: %s\n' % (description, message)
assert err == ''
#noinspection PyMethodMayBeStatic
class TestNagiosWriter(object):
def test_debug(self, capsys):
writer = output.NagiosOutputWriter()
msg = 'test message'
writer.debug(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.debug(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.debug(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.debug(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
def test_debug_disabled(self, capsys):
writer = output.NagiosOutputWriter(debug=False)
msg = 'test message'
writer.debug(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.debug(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.debug(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.debug(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
def test_info(self, capsys):
writer = output.NagiosOutputWriter()
msg = 'test message'
writer.info(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.info(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.info(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.info(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
def test_warning(self, capsys):
writer = output.NagiosOutputWriter()
msg = 'test message'
writer.warning(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.warning(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.warning(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.warning(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
def test_error(self, capsys):
writer = output.NagiosOutputWriter()
msg = 'test message'
writer.error(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.error(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.error(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.error(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
def test_exception(self, capsys):
writer = output.NagiosOutputWriter()
msg = 'test message'
writer.exception(msg)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test arg %s'
args = ('1st',)
writer.exception(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test args %d %s'
args = (1, 'two')
writer.exception(msg, *args)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
msg = 'test kwargs %(num)d %(string)s'
kwargs = dict(num=1, string='two')
writer.exception(msg, kwargs)
(out, err) = capsys.readouterr()
assert out == ''
assert err == ''
def test_single_result_check(self, capsys):
writer = output.NagiosOutputWriter()
output.error_occurred = False
# one server with no error
writer.result_check('a', 'test', True, None)
writer.close()
(out, err) = capsys.readouterr()
assert out == 'BARMAN OK - Ready to serve the Espresso backup ' \
'for a\n'
assert err == ''
assert not output.error_occurred
def test_result_check(self, capsys):
writer = output.NagiosOutputWriter()
output.error_occurred = False
# three server with no error
writer.result_check('a', 'test', True, None)
writer.result_check('b', 'test', True, None)
writer.result_check('c', 'test', True, None)
writer.close()
(out, err) = capsys.readouterr()
assert out == 'BARMAN OK - Ready to serve the Espresso backup ' \
'for 3 server(s) * a * b * c\n'
assert err == ''
assert not output.error_occurred
def test_single_result_check_error(self, capsys):
writer = output.NagiosOutputWriter()
output.error_occurred = False
# one server with one error
writer.result_check('a', 'test', False, None)
writer.close()
(out, err) = capsys.readouterr()
assert out == 'BARMAN CRITICAL - server a has issues * ' \
'a FAILED: test\na.test: FAILED\n'
assert err == ''
assert output.error_occurred
assert output.error_exit_code == 2
def test_result_check_error(self, capsys):
writer = output.NagiosOutputWriter()
output.error_occurred = False
# three server with one error
writer.result_check('a', 'test', True, None)
writer.result_check('b', 'test', False, None)
writer.result_check('c', 'test', True, None)
writer.close()
(out, err) = capsys.readouterr()
assert out == 'BARMAN CRITICAL - 1 server out of 3 have issues * ' \
'b FAILED: test\nb.test: FAILED\n'
assert err == ''
assert output.error_occurred
assert output.error_exit_code == 2
| huddler/pgbarman | tests/test_output.py | Python | gpl-3.0 | 36,282 |
# coding: utf-8
from flask import Blueprint
from flask import request, render_template, render_template_string, Flask
from flask import jsonify
import json
import random
from ..decorators.crossdomain import crossdomain
__all__ = ['bp']
bp = Blueprint('demo_handler', __name__)
###############################################################
#
# Mock Data With BACKBONE.JS
#
###############################################################
##################### Security Mock Start #####################
@bp.route('/security/authenticate', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def authenticate():
print 'Auth User name :>>' + request.form.get('username')
return jsonify(security_user={'auth_token': 'mocked-hmac-authorization-token'})
@bp.route('/security/signout', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def signout():
return jsonify(result='success')
##################### Security Mock End #######################
##################### Vehicle Mock Data #######################
@bp.route('/vehicles', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_vehicles():
mock_vehicles = retrieve_mock_data('mock-vehicle-record.json', 'mock-data-backbone')
return json.dumps(mock_vehicles);
@bp.route('/vehicles', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_vehicle():
return jsonify(id=random.randint(8, 1000))
@bp.route('/vehicles/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_vehicle(id):
print 'In DELETE METHOD..'
return jsonify(id=id)
@bp.route('/vehicles/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_vehicle(id):
print 'In PUT METHOD..'
return jsonify(id=id)
@bp.route('/vehicle-criteriable-attrs', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_vehicle_criteriable_attrs():
mock_vehicle_criteriable_attrs = retrieve_mock_data('mock-vehicle-header.json', 'mock-data-backbone')
print mock_vehicle_criteriable_attrs
return json.dumps(mock_vehicle_criteriable_attrs)
@bp.route('/vehicle-history/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_vehicle_history(id):
mock_vehicle_hisroty = retrieve_mock_data('mock-vehicle-history-record.json', 'mock-data-backbone')
return json.dumps(mock_vehicle_hisroty)
@bp.route('/vehicle-general-info/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_vehicle_general_info(id):
mock_vehicle_general_info = retrieve_mock_data('mock-vehicle-details.json', 'mock-data-backbone')
return json.dumps(mock_vehicle_general_info);
##################### Vehicle Mock Data #######################
####################### User Mock Data ########################
@bp.route('/users', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_users():
mock_users = retrieve_mock_data('mock-user-record.json', 'mock-data-backbone')
return json.dumps(mock_users);
@bp.route('/users', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_user():
return jsonify(id=random.randint(8, 1000));
@bp.route('/users/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_user(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/users/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_user(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/user-attrs', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_atts():
mock_user_group_attrs = retrieve_mock_data('mock-user-attrs.json', 'mock-data-backbone')
return json.dumps(mock_user_attrs);
@bp.route('/user-general-info/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_general_info(id):
mock_user_general_info = retrieve_mock_data('mock-user-details.json', 'mock-data-backbone')
return json.dumps(mock_user_general_info);
@bp.route('/user-user-groups/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_user_groups(id):
mock_user_user_groups = retrieve_mock_data('mock-user-user-groups.json', 'mock-data-backbone')
return json.dumps(mock_user_user_groups);
@bp.route('/user-history/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_history(id):
mock_user_hisroty = retrieve_mock_data('mock-user-history-record.json', 'mock-data-backbone')
return json.dumps(mock_user_hisroty);
####################### User Mock Data ########################
################### User Group Mock Data ######################
@bp.route('/user-groups', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_groups():
mock_user_groups = retrieve_mock_data('mock-user-group-record.json', 'mock-data-backbone')
return json.dumps(mock_user_groups);
@bp.route('/user-groups', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_user_group():
return jsonify(id=random.randint(8, 1000));
@bp.route('/user-groups/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_user_group(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/user-groups/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_user_group(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/user-group-attrs', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_group_atts():
mock_user_group_attrs = retrieve_mock_data('mock-user-group-attrs.json', 'mock-data-backbone')
return json.dumps(mock_user_group_attrs);
@bp.route('/user-group-general-info/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_group_general_info(id):
mock_user_group_general_info = retrieve_mock_data('mock-user-group-details.json', 'mock-data-backbone')
return json.dumps(mock_user_group_general_info);
@bp.route('/user-group-users/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_group_users(id):
mock_user_user_groups = retrieve_mock_data('mock-user-group-users.json', 'mock-data-backbone')
return json.dumps(mock_user_user_groups);
@bp.route('/user-group-history/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_user_group_history(id):
mock_user_group_hisroty = retrieve_mock_data('mock-user-group-history-record.json', 'mock-data-backbone')
return json.dumps(mock_user_group_hisroty);
################### User Group Mock Data ######################
####################### Role Mock Data ########################
@bp.route('/roles', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_roles():
mock_roles = retrieve_mock_data('mock-role-record.json', 'mock-data-backbone')
return json.dumps(mock_roles);
@bp.route('/roles/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_role_details(id):
print id
mock_role_details = retrieve_mock_data('mock-role-details.json', 'mock-data-backbone')
return json.dumps(mock_role_details);
@bp.route('/roles', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_role():
return jsonify(id=random.randint(8, 1000));
@bp.route('/roles/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_role(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/roles/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_role(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/role-general-info/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_role_general_info(id):
mock_role_general_info = retrieve_mock_data('mock-role-details.json', 'mock-data-backbone')
return json.dumps(mock_role_general_info);
@bp.route('/role-history/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_role_history(id):
mock_role_hisroty = retrieve_mock_data('mock-role-history-record.json', 'mock-data-backbone')
return json.dumps(mock_role_hisroty);
@bp.route('/role-privileges/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_role_privileges(id):
mock_role_privileges = retrieve_mock_data('mock-role-privileges.json', 'mock-data-backbone')
return json.dumps(mock_role_privileges);
@bp.route('/role-user-groups/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_role_user_groups(id):
mock_role_user_groups = retrieve_mock_data('mock-role-user-groups.json', 'mock-data-backbone')
return json.dumps(mock_role_user_groups);
@bp.route('/role-users/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_role_users(id):
mock_role_users = retrieve_mock_data('mock-role-users.json', 'mock-data-backbone')
return json.dumps(mock_role_users);
####################### Role Mock Data ########################
##################### Privilege Mock Data #####################
@bp.route('/privileges', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_priveilges():
mock_privileges= retrieve_mock_data('mock-privilege-record.json', 'mock-data-backbone')
return json.dumps(mock_privileges);
@bp.route('/privileges', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_privilege():
return jsonify(id=random.randint(8, 1000));
@bp.route('/privileges/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_privilege(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/privileges/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type')
def update_privilege(id):
print 'In PUT METHOD..'
return jsonify(id=id);
@bp.route('/privilege-history/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_privilege_history(id):
mock_privilege_hisroty = retrieve_mock_data('mock-privilege-history-record.json', 'mock-data-backbone')
return json.dumps(mock_privilege_hisroty);
@bp.route('/privilege-general-info/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_privilege_general_info(id):
mock_privilege_general_info = retrieve_mock_data('mock-privilege-details.json', 'mock-data-backbone')
return json.dumps(mock_privilege_general_info);
@bp.route('/privilege-roles/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_privilege_roles(id):
mock_privilege_roles = retrieve_mock_data('mock-privilege-roles.json', 'mock-data-backbone')
return json.dumps(mock_privilege_roles);
##################### Privilege Mock Data #####################
##################### Criteria Mock Data ######################
@bp.route('/criterias', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_criterias():
fetch_criterias = retrieve_mock_data('mock-criteria-record.json', 'mock-data-backbone')
return json.dumps(fetch_criterias);
@bp.route('/criterias', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def create_criteria():
return jsonify(id=random.randint(8, 1000));
@bp.route('/criterias/<int:id>', methods=['DELETE', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def destory_criteria(id):
print 'In DELETE METHOD..'
return jsonify(id=id);
@bp.route('/criterias/<int:id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def update_criteria(id):
print 'In PUT METHOD..'
return jsonify(id=id)
@bp.route('/criteria-history/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_criteria_history(id):
mock_criteria_hisroty = retrieve_mock_data('mock-criteria-history-record.json', 'mock-data-backbone')
return json.dumps(mock_criteria_hisroty)
@bp.route('/criteria-general-info/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_criteria_general_info(id):
mock_criteria_general_info = retrieve_mock_data('mock-criteria-details.json', 'mock-data-backbone')
return json.dumps(mock_criteria_general_info);
@bp.route('/criteria-privileges/<int:id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_criteria_privileges(id):
mock_criteria_privileges = retrieve_mock_data('mock-criteria-privileges.json', 'mock-data-backbone')
return json.dumps(mock_criteria_privileges);
##################### Criteria Mock Data ######################
################# Generic Filter Mock Data ###################
@bp.route('/generic-filter', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def fetch_generic_filter():
print request.headers.get('Authorization')
mock_filter_settings = retrieve_mock_data('mock-filter-settings.json', 'mock-data-backbone')
return json.dumps(mock_filter_settings)
@bp.route('/generic-records/filter', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='Content-Type, Authorization')
def filter_generic_records():
print request.headers.get('Authorization')
print 'Generic Filter Params >> '+request.args.get('q');
mock_filter_records = retrieve_mock_data('mock-filter-records.json', 'mock-data-backbone')
return json.dumps(mock_filter_records)
################# Generic Filter Mock Data ###################
#################### Method for Mock data #####################
def is_ajax(request):
return "X-Requested-With" in request.headers and request.headers['X-Requested-With'] == "XMLHttpRequest"
def retrieve_mock_data(file_name, folder='mock-data'):
import os
DEMO_DATA_FOLDER = os.path.join(os.getcwd(), folder)
with open(os.path.join(DEMO_DATA_FOLDER, file_name)) as mock_json:
mock_data = json.load(mock_json)
return mock_data
| tim-tang/arctic-bear | arctic/handlers/demo_handler.py | Python | mit | 15,878 |
"""apoio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from apoio import views
urlpatterns = [
url(r'^coletores/', include('coletores.urls')),
url(r'^faqs/', include('faqs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^volunteer/$', views.volunteer, name='volunteer'),
]
| piratas/apoio-piratas | apoio/apoio/urls.py | Python | gpl-3.0 | 995 |
"""
pyNEAT
Copyright (C) 2007-2008 Brian Greer
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import math
def getError(outputs, targets):
if len(targets) != len(outputs):
raise Exception('Invalid number of targets (%d != %d)' % (len(targets), len(outputs)))
error = 0.0
for i in range(len(targets)):
output = outputs[i]
target = targets[i]
if len(output) != len(target):
raise Exception('Invalid target size (%d != %d)' % (len(target), len(output)))
for j in range(len(target)):
delta = output[j] - target[j]
error += delta * delta
return error
#def getError(outputs, targets):
# deltas = [map(lambda i, j: (i - j) * (i - j), x, y) for x, y in zip(outputs, targets)]
# return reduce(lambda x, y: x + y, deltas)
def sumSquaredError(outputs, targets):
error = getError(outputs, targets)
return math.sqrt(error)
def rootMeanSquaredError(outputs, targets):
error = getError(outputs, targets)
return math.sqrt(error / len(targets))
def meanError(outputs, targets):
return getError(outputs, targets) * 0.5
| liquidkarma/pyneat | pyNEAT/Fitness.py | Python | gpl-2.0 | 1,737 |
from tests import utils
import tempfile
import pytest
from cutevariant.gui import export as exp
from cutevariant.gui.plugins.variant_view import widgets
from cutevariant.core import sql, importer
from cutevariant.core.reader import VcfReader
import os
from PySide2.QtWidgets import *
from PySide2.QtCore import *
from PySide2.QtGui import *
@pytest.fixture
def conn():
# Required a real file to make it work !
tempdb = tempfile.mkstemp(suffix=".db")[1]
conn = sql.get_sql_connection(tempdb)
importer.import_reader(conn, VcfReader(open("examples/test.snpeff.vcf"), "snpeff"))
return conn
@pytest.mark.parametrize("extension", ["csv", "bed", "ped", "vcf"])
def test_export_dialog(qtbot, conn, extension):
filename = tempfile.mkstemp(suffix="." + extension)[1]
os.remove(filename)
dialog = exp.ExportDialogFactory.create_dialog(conn, extension, filename)
qtbot.addWidget(dialog)
dialog.show()
assert dialog.isVisible()
qtbot.mouseClick(dialog.button_box.button(QDialogButtonBox.Save), Qt.LeftButton)
assert os.path.exists(filename), "the file has not been created"
# print(QCoreApplication.instance().activePopupWidget())
| labsquare/CuteVariant | tests/gui/test_export.py | Python | gpl-3.0 | 1,187 |
from abc import ABC, abstractmethod
import io
import gzip
import math
from collections import Counter
# use to format value with URL caracter : #See Issue
from urllib.parse import unquote
import cutevariant.commons as cm
from cutevariant import LOGGER
class AbstractReader(ABC):
"""Base class for all Readers required to import variants into the database.
Subclass it if you want a new file parser .
Attributes:
device: A file object typically returned by open(); Can be None if
FakeReader type is instanciated.
file_size: File size in bytes
number_lines: Number of lines in the file (compressed or not).
See Also: :meth:`self.compute_number_lines`
read_bytes: Current bytes readed (progression = read_bytes / file_size)
It's a fallback if number_lines can't be computed.
samples: List of samples in the file (default: empty)
ignored_fields: Skip fields in importations.
A list of fields to skip [{field_name:"AF", "category":"variant"}]
Example:
with open(filename,"r") as file:
reader = Reader(file)
reader.get_variants()
"""
def __init__(self, device):
self.device = device
self.number_lines = None
self.read_bytes = 0
self.samples = list()
self.file_size = 0
self.ignored_fields = set()
@classmethod
@abstractmethod
def get_variants(cls):
"""Abstract method must return variants as an iterable of dictionnaries.
Variant dictionnary has 4 mandatory fields `chr`, `pos`, `ref`, `alt`.
Other fields are optionnal
For instance::
[
{"chr": "chr3","pos": 3244,"ref": "A","alt":"C", "qual": 30},
{"chr": "chr4","pos": 3244,"ref": "A","alt":"C","qual": 20},
{"chr": "chr5","pos": 3244,"ref": "A","alt":"C","qual": 10 },
]
Annotations and Samples objects can be embbeded into a variant dictionnaries.
Annotations describes several annotations for one variant.
In the most of the case, those are relative to transcripts.
Samples describes information relative to a variant with a sample,
like genotype (gt). This is a mandatory field.
.. code-block:: python
[{
"chr": "chr3",
"pos": 3244,
"ref": "A",
"alt":"C",
"field_n": "value_n",
"annotations": [
{"gene": "GJB2", "transcripts": "NM_00232.1", "field_n": "value_n"},
{"gene": "GJB2", "transcripts": "NM_00232.2", "field_n": "value_n"}
],
"samples": [
{"name":"boby", "genotype": 1, "field_n":"value_n"},
{"name":"kevin", "genotype": 1, "field_n":"value_n"}
]
},]
Yields:
dict: variant dictionnary
Examples:
for variant in reader.get_variants():
print(variant["chr"], variant["pos"])
"""
raise NotImplementedError(cls.__class__.__name__)
@classmethod
@abstractmethod
def get_fields(cls):
"""Abstract method hat must return fields description
Full output::
[
{"name": "chr", "type": "text", "category": "variant", "description": "..."},
{"name": "pos", "type": "text", "category": "variant", "description": "..."},
{"name": "ref", "type": "text", "category": "variant", "description": "..."},
{"name": "alt", "type": "text", "category": "variant", "description": "..."},
{"name": "field_n", "type": "text", "category": "variant", "description": "..."},
{"name": "name", "type": "text", "category": "annotations", "samples": "..."},
{"name": "genotype", "type": "text", "category": "annotations", "samples": "..."}
]
Yields:
dict: field dictionnary
Examples:
for field in reader.get_fields():
print(field["name"], field["description"])
"""
raise NotImplementedError(cls.__class__.__name__)
def get_samples(self) -> list:
"""Return list of samples
Override this method to have samples in sqlite database.
"""
return []
def get_metadatas(self) -> dict:
"""Get meta data
Override this method to have meta data in sqlite database
"""
return {}
def get_extra_fields(self):
"""Yield fields with extra mandatory fields like 'comment' and 'score'"""
yield {
"name": "favorite",
"type": "bool",
"category": "variants",
"description": "Tag status as favorite",
}
yield {
"name": "comment",
"type": "str",
"category": "variants",
"description": "Variant comment written by user",
}
yield {
"name": "classification",
"type": "int",
"category": "variants",
"description": "ACMG score",
}
yield {
"name": "tags",
"type": "str",
"category": "variants",
"description": "list of tags",
}
yield {
"name": "count_hom",
"type": "int",
"category": "variants",
"description": "Number of homozygous genotypes (1/1)",
}
yield {
"name": "count_het",
"type": "int",
"category": "variants",
"description": "Number of heterozygous genotypes (0/1)",
}
yield {
"name": "count_ref",
"type": "int",
"category": "variants",
"description": "Number of homozygous genotypes (0/0)",
}
yield {
"name": "count_var",
"type": "int",
"category": "variants",
"description": "Number of variants (not 0/0)",
}
yield {
"name": "is_indel",
"type": "bool",
"category": "variants",
"description": "True if variant is an indel",
}
yield {
"name": "is_snp",
"type": "bool",
"category": "variants",
"description": "True if variant is a snp",
}
yield {
"name": "annotation_count",
"type": "int",
"category": "variants",
"description": "Count of transcripts per variant",
}
yield {
"name": "case_count_hom",
"type": "int",
"category": "variants",
"description": "Number of homozygous genotypes (1/1) in case",
}
yield {
"name": "case_count_het",
"type": "int",
"category": "variants",
"description": "Number of heterozygous genotypes (1/0) in case",
}
yield {
"name": "case_count_ref",
"type": "int",
"category": "variants",
"description": "Number of homozygous genotypes (0/0) in case",
}
yield {
"name": "control_count_hom",
"type": "int",
"category": "variants",
"description": "Number of homozygous genotypes (1/1) in control",
}
yield {
"name": "control_count_het",
"type": "int",
"category": "variants",
"description": "Number of heterozygous genotypes (1/0) in control",
}
yield {
"name": "control_count_ref",
"type": "int",
"category": "variants",
"description": "Number of homozygous genotypes (0/0) in control",
}
# avoid duplicates fields by categories ...
duplicates = set()
for field in self.get_fields():
# Create unique identifiant by categories
unique_key = field["category"] + "." + field["name"]
is_unique = unique_key not in duplicates
is_ignored = (field["name"], field["category"]) in self.ignored_fields
if is_unique and not is_ignored:
yield field
duplicates.add(unique_key)
def add_ignored_field(self, field_name: str, field_category: str):
"""Add new field to the ignored_fields list.
ignored fields will not returned by get_extra_fields and then are not imporpted
into the database
Args:
field_name (str): a field name
field_category (str): the category field name (variant,annotation,sample)
"""
self.ignored_fields.add((field_name, field_category))
def get_extra_variants(self, **kwargs):
"""Yield variants with extra information computed and format if necessary
The following information are added. See get_extra_fields
- favorite (bool): (Default: False)
- comment (str): (Default: "")
- classification (int): ACMG score (Default: 3 (uncertain significance)
- count_var (int): Number of variants (not 0/0)
- count_hom (int): How many variants are mutant homozygous within samples
- count_het (int): How many variants are heterozygous within samples
- count_ref (int): How many variants are wild homozygous with samples
- is_indel (bool): Is the variation an insertion / deletion
- is_snp (bool): Is the variation an single nucleotide variation
If case/control are available from a pedfile, counting from case and
control is also computed.
In this case, it is necessary to give sample names in "case" and
"control" keys in kwargs .
Example of supported kwargs::
{
"case": ["boby", "raymond"],
"control": ["lucas", "pierre"]
}
- case_count_hom (int): How many variants are mutant homozygous within case samples
- case_count_het (int): How many variants are heterozygous within case samples
- case_count_ref (int): How many variants are wild heterozygous within case samples
- control_count_hom (int): How many variants are mutant homozygous within control samples
- control_count_het (int): How many variants are heterozygous within control samples
- control_count_ref (int): How many variants are wild heterozygous within control samples
See Also:
`cutevariant.core.reader.vcfreader.VcfReader.parse_variants`
Args:
**kwargs (optional): case and control sample names
Yields:
(generator[dict]): variants. See also: :meth:`get_variants`.
Raises:
AssertionError: If sample(s) are both in cases and controls.
"""
case_and_control_samples_found = False
if "case" in kwargs and "control" in kwargs:
# Samples can't be both in cases and controls
case_samples = kwargs["case"]
control_samples = kwargs["control"]
assert not set(case_samples) & set(
control_samples
), "Found sample both in cases and controls!"
case_and_control_samples_found = True
for variant in self.get_variants():
variant["favorite"] = False
variant["comment"] = ""
variant["classification"] = 0
variant["tags"] = ""
# For now set the first annotation as a major transcripts
if "annotations" in variant:
variant["annotation_count"] = len(variant["annotations"])
# Count genotype by control and case
genotype_counter = Counter()
if "samples" in variant:
for sample in variant["samples"]:
genotype_counter[sample["gt"]] += 1
variant["count_hom"] = genotype_counter[2]
variant["count_het"] = genotype_counter[1]
variant["count_ref"] = genotype_counter[0]
# Number of variants (not 0/0)
variant["count_var"] = genotype_counter[1] + genotype_counter[2]
variant["is_indel"] = len(variant["ref"]) != len(variant["alt"])
variant["is_snp"] = len(variant["ref"]) == len(variant["alt"])
# Count genotype by control and case
if case_and_control_samples_found:
case_counter = Counter()
control_counter = Counter()
if "samples" in variant:
# Note: No garantee that samples from DB are all qualified
# by PED data.
# So some samples from variants may not be in case/control samples.
for sample in variant["samples"]:
if sample["name"] in case_samples:
case_counter[sample["gt"]] += 1
elif sample["name"] in control_samples:
control_counter[sample["gt"]] += 1
variant["case_count_hom"] = case_counter[2]
variant["case_count_het"] = case_counter[1]
variant["case_count_ref"] = case_counter[0]
variant["control_count_hom"] = control_counter[2]
variant["control_count_het"] = control_counter[1]
variant["control_count_ref"] = control_counter[0]
# Remove ignore variants
for name, category in self.ignored_fields:
if category == "variants":
if name in variant:
del variant[name]
# remove from category
if category == "annotations":
for ann in variant["annotations"]:
if name in ann:
del ann[name]
if category == "samples":
for sample in variant["samples"]:
if name in sample:
del sample[name]
# Format variant value ! For instance replace %3D by "=" using unquote
# See issue https://github.com/labsquare/cutevariant/issues/220
for key, value in variant.items():
if isinstance(value, str):
variant[key] = unquote(value)
if "annotations" in variant:
for i, ann in enumerate(variant["annotations"]):
for key, value in ann.items():
if isinstance(value, str):
variant["annotations"][i][key] = unquote(
variant["annotations"][i][key]
)
if "samples" in variant:
for i, sample in enumerate(variant["samples"]):
for key, value in sample.items():
if isinstance(value, str):
variant["samples"][i][key] = unquote(
variant["samples"][i][key]
)
yield nullify(variant)
def get_extra_fields_by_category(self, category: str):
"""Syntaxic suggar to get fields according their category
:param category can be usually variants, samples, annotations
:return: A generator of fields
:rtype: <generator>
"""
return (
field for field in self.get_extra_fields() if field["category"] == category
)
def get_fields_by_category(self, category: str):
"""Syntaxic suggar to get fields according their category
:param category can be usually variants, samples, annotations
:return: A generator of fields
:rtype: <generator>
"""
return (field for field in self.get_fields() if field["category"] == category)
def get_variants_count(self) -> int:
"""Get variant count from the device
Override this method to make it faster
"""
return len(tuple(self.get_variants()))
def compute_number_lines(self):
"""Get a sample of lines in file if possible and if the end of file is
not reached compute an evaluation of the global number of lines.
Returns:
Nothing but sets `self.number_lines` attribute.
"""
def find_lines_in_text_file(text_file_handler):
"""Get first 15000 lines
PS: don't care of headers (# lines), the influence is marginal on big
files and also on small files (so quick to insert that the wrong number
of lines is invisible).
"""
first_lines = []
for _ in range(15000):
try:
first_lines.append(len(next(text_file_handler)))
except StopIteration:
# EOF: exact number of lines is known
self.number_lines = len(first_lines)
break
if self.number_lines is None:
self.number_lines = int(
self.file_size / (sum(first_lines) / len(first_lines))
)
LOGGER.debug(
"nb lines evaluated: %s; size: %s; lines used: %s",
self.number_lines,
self.file_size,
len(first_lines),
)
# FakeReader is used ?
if not self.device:
return 0
# Detect type of file handler
if isinstance(self.device, (io.RawIOBase, io.BufferedIOBase)):
# Binary opened file => assert that it is a vcf.gz file
with gzip.open(self.device.name, "rb") as file_obj:
find_lines_in_text_file(file_obj)
elif isinstance(self.device, io.TextIOBase):
find_lines_in_text_file(self.device)
else:
LOGGER.error("Unknown file handler type: %s", type(self.device))
raise TypeError("Unknown file handler type: %s" % type(self.device))
# Rewind the file
self.device.seek(0)
def check_variant_schema(variant: dict):
"""Test if get_variant returns well formated nested data.
This method is for testing purpose. It raises an exception if data is corrupted
:param variant dict returned by AbstractReader.get_variant()
"""
try:
from schema import Schema, And, Or, Use, Optional, Regex
except ImportError as e:
LOGGER.warning("You should install optional package 'schema' via:")
LOGGER.warning("\t - pypi: pip install cutevariant[dev]")
LOGGER.warning("\t - git repo in editable mode: pip -e . [dev]")
raise e
checker = Schema(
{
"chr": And(Use(str.lower), str),
"pos": int,
"ref": And(Use(str.upper), Regex(r"^[ACGTN]+")),
"alt": And(Use(str.upper), Regex(r"^[ACGTN]+")),
Optional(str): Or(int, str, bool, float, None),
Optional("annotations"): [
{
"gene": str,
"transcript": str,
Optional(str): Or(int, str, bool, float, None),
}
],
Optional("samples"): [
{
"name": str,
"gt": And(int, lambda x: x in [-1, 0, 1, 2]),
Optional(str): Or(int, str, bool, float, None),
}
],
}
)
checker.validate(variant)
def check_field_schema(field: dict):
"""Test if get_field returns well formated data
This method is for testing purpose. It raises an exception if data is corrupted
:param field dict returned by AbstractReader.get_field()
"""
try:
from schema import Schema, And, Use, Optional
except ImportError as e:
LOGGER.warning("You should install optional package 'schema' via:")
LOGGER.warning("\t - pypi: pip install cutevariant[dev]")
LOGGER.warning("\t - git repo in editable mode: pip -e . [dev]")
raise e
checker = Schema(
{
"name": And(str, Use(str.lower)),
"type": lambda x: x in ["str", "int", "bool", "float"],
"category": lambda x: x in ["variants", "annotations", "samples"],
"description": str,
Optional("constraint", default="NULL"): str,
}
)
checker.validate(field)
def sanitize_field_name(field: str):
# TODO
# LOGGER.warning("NOT implemented function!!")
return field
def nullify(variant: dict) -> dict:
"""Convert empty fields value to NONE
This is used have NULL value inside the SQLITE inside an empty string
"""
def convert_to_none(value):
"""convert value to None according type"""
EMPTY_STRING = ["", "."]
if isinstance(value, str):
if value in EMPTY_STRING:
return None
if isinstance(value, float) or isinstance(value, int):
if math.isnan(value):
return None
return value
for key in variant.keys():
variant[key] = convert_to_none(variant[key])
if key == "annotations":
for ann in variant["annotations"]:
for ann_key in ann.keys():
ann[ann_key] = convert_to_none(ann[ann_key])
if key == "samples":
for sample in variant["samples"]:
for sample_key in sample.keys():
sample[sample_key] = convert_to_none(sample[sample_key])
return variant
| labsquare/CuteVariant | cutevariant/core/reader/abstractreader.py | Python | gpl-3.0 | 21,709 |
'''tzinfo timezone information for America/Miquelon.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Miquelon(DstTzInfo):
'''America/Miquelon timezone definition. See datetime.tzinfo for details'''
zone = 'America/Miquelon'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1911,5,15,3,44,40),
d(1980,5,1,4,0,0),
d(1987,4,5,5,0,0),
d(1987,10,25,4,0,0),
d(1988,4,3,5,0,0),
d(1988,10,30,4,0,0),
d(1989,4,2,5,0,0),
d(1989,10,29,4,0,0),
d(1990,4,1,5,0,0),
d(1990,10,28,4,0,0),
d(1991,4,7,5,0,0),
d(1991,10,27,4,0,0),
d(1992,4,5,5,0,0),
d(1992,10,25,4,0,0),
d(1993,4,4,5,0,0),
d(1993,10,31,4,0,0),
d(1994,4,3,5,0,0),
d(1994,10,30,4,0,0),
d(1995,4,2,5,0,0),
d(1995,10,29,4,0,0),
d(1996,4,7,5,0,0),
d(1996,10,27,4,0,0),
d(1997,4,6,5,0,0),
d(1997,10,26,4,0,0),
d(1998,4,5,5,0,0),
d(1998,10,25,4,0,0),
d(1999,4,4,5,0,0),
d(1999,10,31,4,0,0),
d(2000,4,2,5,0,0),
d(2000,10,29,4,0,0),
d(2001,4,1,5,0,0),
d(2001,10,28,4,0,0),
d(2002,4,7,5,0,0),
d(2002,10,27,4,0,0),
d(2003,4,6,5,0,0),
d(2003,10,26,4,0,0),
d(2004,4,4,5,0,0),
d(2004,10,31,4,0,0),
d(2005,4,3,5,0,0),
d(2005,10,30,4,0,0),
d(2006,4,2,5,0,0),
d(2006,10,29,4,0,0),
d(2007,3,11,5,0,0),
d(2007,11,4,4,0,0),
d(2008,3,9,5,0,0),
d(2008,11,2,4,0,0),
d(2009,3,8,5,0,0),
d(2009,11,1,4,0,0),
d(2010,3,14,5,0,0),
d(2010,11,7,4,0,0),
d(2011,3,13,5,0,0),
d(2011,11,6,4,0,0),
d(2012,3,11,5,0,0),
d(2012,11,4,4,0,0),
d(2013,3,10,5,0,0),
d(2013,11,3,4,0,0),
d(2014,3,9,5,0,0),
d(2014,11,2,4,0,0),
d(2015,3,8,5,0,0),
d(2015,11,1,4,0,0),
d(2016,3,13,5,0,0),
d(2016,11,6,4,0,0),
d(2017,3,12,5,0,0),
d(2017,11,5,4,0,0),
d(2018,3,11,5,0,0),
d(2018,11,4,4,0,0),
d(2019,3,10,5,0,0),
d(2019,11,3,4,0,0),
d(2020,3,8,5,0,0),
d(2020,11,1,4,0,0),
d(2021,3,14,5,0,0),
d(2021,11,7,4,0,0),
d(2022,3,13,5,0,0),
d(2022,11,6,4,0,0),
d(2023,3,12,5,0,0),
d(2023,11,5,4,0,0),
d(2024,3,10,5,0,0),
d(2024,11,3,4,0,0),
d(2025,3,9,5,0,0),
d(2025,11,2,4,0,0),
d(2026,3,8,5,0,0),
d(2026,11,1,4,0,0),
d(2027,3,14,5,0,0),
d(2027,11,7,4,0,0),
d(2028,3,12,5,0,0),
d(2028,11,5,4,0,0),
d(2029,3,11,5,0,0),
d(2029,11,4,4,0,0),
d(2030,3,10,5,0,0),
d(2030,11,3,4,0,0),
d(2031,3,9,5,0,0),
d(2031,11,2,4,0,0),
d(2032,3,14,5,0,0),
d(2032,11,7,4,0,0),
d(2033,3,13,5,0,0),
d(2033,11,6,4,0,0),
d(2034,3,12,5,0,0),
d(2034,11,5,4,0,0),
d(2035,3,11,5,0,0),
d(2035,11,4,4,0,0),
d(2036,3,9,5,0,0),
d(2036,11,2,4,0,0),
d(2037,3,8,5,0,0),
d(2037,11,1,4,0,0),
]
_transition_info = [
i(-13500,0,'LMT'),
i(-14400,0,'AST'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
i(-7200,3600,'PMDT'),
i(-10800,0,'PMST'),
]
Miquelon = Miquelon()
| newvem/pytz | pytz/zoneinfo/America/Miquelon.py | Python | mit | 4,711 |
from cppstyle import check_naming, check_order, check_indent, check_comments
def check(file, node, config):
issues = []
if file == node.file:
issues += check_naming.check(node, config)
issues += check_indent.check(node, config)
issues += check_order.check(node, config)
issues += check_comments.check(node, config)
for c in node.children:
issues += check(file, c, config)
return issues
| gfelbing/cppstyle | cppstyle/check.py | Python | gpl-3.0 | 446 |
"""Axis class and associated."""
# --- import --------------------------------------------------------------------------------------
import re
import numexpr
import operator
import functools
import numpy as np
from .. import exceptions as wt_exceptions
from .. import kit as wt_kit
from .. import units as wt_units
# --- define --------------------------------------------------------------------------------------
__all__ = ["Axis"]
operator_to_identifier = {}
operator_to_identifier["/"] = "__d__"
operator_to_identifier["="] = "__e__"
operator_to_identifier["-"] = "__m__"
operator_to_identifier["+"] = "__p__"
operator_to_identifier["*"] = "__t__"
identifier_to_operator = {value: key for key, value in operator_to_identifier.items()}
operators = "".join(operator_to_identifier.keys())
# --- class ---------------------------------------------------------------------------------------
class Axis(object):
"""Axis class."""
def __init__(self, parent, expression, units=None):
"""Data axis.
Parameters
----------
parent : WrightTools.Data
Parent data object.
expression : string
Axis expression.
units : string (optional)
Axis units. Default is None.
"""
self.parent = parent
self.expression = expression
if units is None:
self.units = self.variables[0].units
else:
self.units = units
def __getitem__(self, index):
vs = {}
for variable in self.variables:
arr = variable[index]
vs[variable.natural_name] = wt_units.converter(arr, variable.units, self.units)
return numexpr.evaluate(self.expression.split("=")[0], local_dict=vs)
def __repr__(self) -> str:
return "<WrightTools.Axis {0} ({1}) at {2}>".format(
self.expression, str(self.units), id(self)
)
@property
def _leaf(self):
out = self.expression
if self.units is not None:
out += " ({0})".format(self.units)
out += " {0}".format(self.shape)
return out
@property
def full(self) -> np.ndarray:
"""Axis expression evaluated and repeated to match the shape of the parent data object."""
arr = self[:]
for i in range(arr.ndim):
if arr.shape[i] == 1:
arr = np.repeat(arr, self.parent.shape[i], axis=i)
return arr
@property
def identity(self) -> str:
"""Complete identifier written to disk in data.attrs['axes']."""
return self.expression + " {%s}" % self.units
@property
def label(self) -> str:
"""A latex formatted label representing axis expression."""
label = self.expression.replace("_", "\\;")
if self.units_kind:
symbol = wt_units.get_symbol(self.units)
if symbol is not None:
for v in self.variables:
vl = "%s_{%s}" % (symbol, v.label)
vl = vl.replace("_{}", "") # label can be empty, no empty subscripts
label = label.replace(v.natural_name, vl)
label += rf"\,\left({wt_units.ureg.Unit(self.units):~}\right)"
label = r"$\mathsf{%s}$" % label
return label
@property
def natural_name(self) -> str:
"""Valid python identifier representation of the expession."""
name = self.expression.strip()
for op in operators:
name = name.replace(op, operator_to_identifier[op])
return wt_kit.string2identifier(name)
@property
def ndim(self) -> int:
"""Get number of dimensions."""
try:
assert self._ndim is not None
except (AssertionError, AttributeError):
self._ndim = self.variables[0].ndim
finally:
return self._ndim
@property
def points(self) -> np.ndarray:
"""Squeezed array."""
return np.squeeze(self[:])
@property
def shape(self) -> tuple:
"""Shape."""
return wt_kit.joint_shape(*self.variables)
@property
def size(self) -> int:
"""Size."""
return functools.reduce(operator.mul, self.shape)
@property
def units(self):
return self._units
@units.setter
def units(self, value):
if value == "None":
value = None
if value is not None and value not in wt_units.ureg:
raise ValueError(f"'{value}' is not in the unit registry")
self._units = value
@property
def units_kind(self) -> str:
"""Units kind."""
return wt_units.kind(self.units)
@property
def variables(self) -> list:
"""Variables."""
try:
assert self._variables is not None
except (AssertionError, AttributeError):
pattern = "|".join(map(re.escape, operators))
keys = re.split(pattern, self.expression)
indices = []
for key in keys:
if key in self.parent.variable_names:
indices.append(self.parent.variable_names.index(key))
self._variables = [self.parent.variables[i] for i in indices]
finally:
return self._variables
@property
def masked(self) -> np.ndarray:
"""Axis expression evaluated, and masked with NaN shared from data channels."""
arr = self[:]
arr.shape = self.shape
arr = wt_kit.share_nans(arr, *self.parent.channels)[0]
return np.nanmean(
arr, keepdims=True, axis=tuple(i for i in range(self.ndim) if self.shape[i] == 1)
)
def convert(self, destination_units, *, convert_variables=False):
"""Convert axis to destination_units.
Parameters
----------
destination_units : string
Destination units.
convert_variables : boolean (optional)
Toggle conversion of stored arrays. Default is False.
"""
if self.units is None and (destination_units is None or destination_units == "None"):
return
if not wt_units.is_valid_conversion(self.units, destination_units):
valid = wt_units.get_valid_conversions(self.units)
raise wt_exceptions.UnitsError(valid, destination_units)
if convert_variables:
for v in self.variables:
v.convert(destination_units)
self.units = destination_units
self.parent._on_axes_updated()
def max(self):
"""Axis max."""
return np.nanmax(self[:])
def min(self):
"""Axis min."""
return np.nanmin(self[:])
| wright-group/WrightTools | WrightTools/data/_axis.py | Python | mit | 6,659 |
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
def default(session):
"""Run the unit test suite.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.dlp_v2',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
os.path.join('tests', 'unit'),
*session.posargs
)
@nox.session(python=['2.7', '3.5', '3.6', '3.7'])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=['2.7', '3.6'])
def system(session):
"""Run the system test suite."""
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Use pre-release gRPC for system tests.
session.install('--pre', 'grpcio')
session.install('pytest')
session.install('-e', '.')
session.run('py.test', '--quiet',
os.path.join('tests', 'system'), *session.posargs)
@nox.session(python='3.6')
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session(python='3.6')
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
@nox.session(python='3.6')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.chdir(os.path.dirname(__file__))
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
| jonparrott/gcloud-python | dlp/noxfile.py | Python | apache-2.0 | 3,018 |
# encoding: utf-8
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
A framework to build command line utilities in GenroPy.
Argument parsing inspired by Michele Simionato's plac.
::
Three scripts for the genro-kings under softwell sky,
Seven for goodsoftware-lords in their halls of stone,
Nine for mortal customers with money they will buy,
One for the Dark Lord Gio on his dark throne,
In the Land of GenroPy where the Commands lie.
One Script to rule them all, One Script to find them,
One Script to bring them all and in the darkness bind them
In the Land of GenroPy where the Commands lie.
"""
import sys
import os.path
import argparse
import inspect
import imp
from gnr.devel.utils import AutoDiscovery
# --------------------------------------------------------------------------------------------- Globals
command_registry = {}
# --------------------------------------------------------------------------------------------- Constants
_COMMAND_ARGUMENTS = "__gnrcmd__arguments__"
def command(name=None, description=None, *args, **kwargs):
"""A decorator to define new 'gnr' commands.
See ``ArgumentParser`` constructor in the ``argparse`` module for help on args.
:param name: add???. Default value is ``None``
:param description: add???. Default value is ``None``
:returns: the new command
"""
def decorator(cmd):
global command_registry
if command_registry.get(name, None) is not None:
raise KeyError, "Command '%(name)s' already defined in %(file)s" % dict(name=name, file=command_registry[
name].filename)
desc = description if description else cmd.__doc__
cmd = GnrCommand(main=cmd, name=name or cmd.__name__.lower(), description=desc, *args, **kwargs)
command_registry[name] = cmd
return cmd
return decorator
def argument(dest, *args, **kwargs):
"""A decorator to describe arguments to a 'gnr' command.
See ``add_argument`` in the ``argparse`` module for help on args.
:param dest: add???
:returns: the command
"""
args = list(args)
def decorator(cmd):
argspec = vars(cmd).setdefault(_COMMAND_ARGUMENTS, {})
func_args, _, _, func_defaults = inspect.getargspec(cmd)
if not func_defaults:
func_defaults = ()
idx = func_args.index(dest) - (len(func_args) - len(func_defaults))
if 0 <= idx < len(func_defaults):
default = func_defaults[idx]
has_default = True
else:
has_default = False
if not args:
if has_default:
args.append('--%s' % dest)
kwargs['dest'] = dest
kwargs['default'] = default
else:
args.append(dest)
# some magic for special cases
if has_default:
if default is True:
kwargs['action'] = 'store_false'
kwargs['default'] = True
elif default is False:
kwargs['action'] = 'store_true'
kwargs['default'] = False
kwargs['help'] = "%s (default: %s)" % (kwargs.get('help', ''), repr(default))
argspec[dest] = (args, kwargs)
return cmd
return decorator
class GnrCommand(object):
"""A command line utility."""
def __init__(self, main=None, name=None, help=None, *args, **kwargs):
super(GnrCommand, self).__init__()
self.name = name
self.help = help
self.parser_args = args
self.parser_kwargs = kwargs
self.main = main
@property
def filename(self):
"""File where main is implemented
:returns: add???
"""
try:
return self.main.func_code.co_filename
except:
return "(unknown)"
@property
def lineno(self):
"""Line where main is implemented
:returns: add???
"""
try:
return self.main.func_code.co_firstlineno
except:
return "(unknown)"
@property
def description(self):
"""Return the command description
:returns: the command description
"""
return self.parser_kwargs.get('description', '')
def run(self, *args, **kwargs):
"""Run this command.
:returns: add???
"""
parser = self.init_parser()
if kwargs:
parser.set_defaults(**kwargs)
arguments = parser.parse_args(args or None)
return self.main(**vars(arguments))
def main(self):
"""add???
"""
raise NotImplementedError("Do not use GnrCommand directly, apply @command to a callable.")
def __call__(self, *args, **kwargs):
return self.main(*args, **kwargs)
def init_parser(self, subparsers=None):
"""Initialize this command's arguments.
:returns: add???
"""
if not subparsers:
parser = argparse.ArgumentParser(*self.parser_args, **self.parser_kwargs)
else:
parser = subparsers.add_parser(self.name, help=self.help, *self.parser_args, **self.parser_kwargs)
parser.set_defaults(main=self.main)
arguments = self.auto_arguments()
custom_arguments = getattr(self.main, _COMMAND_ARGUMENTS, {})
arguments.update(custom_arguments)
func_args, _, _, _ = inspect.getargspec(self.main)
for name in func_args:
args, kwargs = arguments[name]
parser.add_argument(*args, **kwargs)
return parser
def auto_arguments(self):
"""Auto-generate a standard argument configuration from __call__'s python arguments
:returns: add???
"""
args, _, _, defaults = inspect.getargspec(self.main)
if not defaults:
defaults = ()
required = args[:len(args) - len(defaults)]
optional = args[len(args) - len(defaults):]
auto = {}
for name in required:
auto[name] = ((name,), {}) # arguments for add_argument
for name, default in zip(optional, defaults):
arg_type = type(default) if default is not None else str
kwargs = dict(dest=name, type=arg_type, default=default, help="(default: %s)" % repr(default))
if arg_type is bool:
kwargs['action'] = 'store_false' if (default == True) else 'store_true'
del kwargs['type']
elif arg_type is type: # a class
arg_type = default.__class__
kwargs['type'] = arg_type
kwargs['metavar'] = arg_type.__name__
auto[name] = (("--%s" % name,), kwargs)
return auto
class CmdRunner(object):
"""Run GenroPy commands.
This class implements the 'gnr' command.
"""
def __init__(self):
self._discover_commands()
def _discover_commands(self):
sys.modules['gnr.commands'] = imp.new_module('gnr.commands')
ad = AutoDiscovery()
for name, cmd in ad.all_commands.items():
imp.load_source('gnr.commands.%s' % name, cmd.path)
def main(self):
"""Parse command line and execute 'gnr' commands."""
parser = self.setup_parser()
args = parser.parse_args()
assert args.main, "No command specified"
main = args.main
del args.main
main(**vars(args))
def setup_parser(self):
"""add???
"""
global command_registry
parser = argparse.ArgumentParser(description="Run Genro commands")
subparsers = parser.add_subparsers(title="commands")
for cmd in command_registry.values():
cmd.init_parser(subparsers)
return parser
@command('commands', help="List commands and where they are implemented")
@argument('verbose', '-v', '--verbose', help="Show command description")
def commands(verbose=False):
"""add???
:param verbose: add???. Default value is ``False``
"""
global command_registry
for name, cmd in command_registry.items():
print "%(name)-20s %(filename)s:%(lineno)s" % dict(name=name, filename=cmd.filename, lineno=cmd.lineno)
if verbose and cmd.help:
print "%(space)20s %(help)s" % dict(space=" ", help=cmd.help)
@command('adreport', help="Print AutoDiscovery report")
@argument('full', '-f', '--full', help="Show full report")
def info(full=False):
"""add???
:param full: add???. Default value is ``False``
"""
ad = AutoDiscovery()
ad.report(full)
@command('adenv', help="Print current project/instance/package/site as environment variables")
@argument('dirs', '-d', '--dirs', help="print directories too")
def info(dirs=False):
"""add???
:param dirs: add???. Default value is ``False``
"""
ad = AutoDiscovery()
print "CURRENT_PROJECT=%s" % (ad.current_project.name if ad.current_project else '')
print "CURRENT_INSTANCE=%s" % (ad.current_instance.name if ad.current_instance else '')
print "CURRENT_PACKAGE=%s" % (ad.current_package.name if ad.current_package else '')
print "CURRENT_SITE=%s" % (ad.current_site.name if ad.current_site else '')
if dirs:
print "CURRENT_PROJECT_DIR=%s" % (ad.current_project.path if ad.current_project else '')
print "CURRENT_INSTANCE_DIR=%s" % (ad.current_instance.path if ad.current_instance else '')
print "CURRENT_PACKAGE_DIR=%s" % (ad.current_package.path if ad.current_package else '')
print "CURRENT_SITE_DIR=%s" % (ad.current_site.path if ad.current_site else '') | poppogbr/genropy | gnrpy/gnr/devel/commands.py | Python | lgpl-2.1 | 10,715 |
name = 'STEM'
identifier = 'edu.cornell.birds.stem'
version = '0.0.2'
def package_dependencies():
return ['edu.utah.sci.vistrails.rpy']
| VisTrails/vistrails-contrib-legacy | stem/__init__.py | Python | bsd-3-clause | 141 |
"""
https://codelab.interviewbit.com/problems/listcycle/
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the first node in the cycle in the linked list
def detectCycle(self, A):
if A is None or A.next is None:
return None
i = A;
j = A
while i is not None and j is not None:
try:
i = i.next
j = i.next.next
except AttributeError:
return None
if i == j:
break
# still not guaranteed to have cycle; could have failed the while loop check
if i is None or j is None:
return None
# found cycle, now find the start
i = A
while i != j:
i = i.next
j = j.next
return i
| JuanCTorres/interview-prep-solutions | codelab/listcycle.py | Python | mit | 1,048 |
from django.utils.translation import ugettext as _, ugettext_lazy as _lazy
from django.core import urlresolvers
from gasistafelice.rest.views.blocks.base import BlockSSDataTables, ResourceBlockAction, CREATE_PDF, SENDME_PDF
from gasistafelice.consts import EDIT, CONFIRM
from gasistafelice.lib.shortcuts import render_to_response, render_to_context_response
from gasistafelice.lib.http import HttpResponse
from gasistafelice.gas.models import GASMember
from gasistafelice.gas.forms.order.gmo import BasketGASMemberOrderForm
from gasistafelice.lib.formsets import BaseFormSetWithRequest
from django.forms.formsets import formset_factory
import cgi, os
import logging
log = logging.getLogger(__name__)
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
class Block(BlockSSDataTables):
BLOCK_NAME = "basket"
BLOCK_DESCRIPTION = _("Basket")
BLOCK_VALID_RESOURCE_TYPES = ["gasmember"]
#3: 'ordered_product__stock__supplier_stock__product', gasstock
COLUMN_INDEX_NAME_MAP = {
0: 'pk',
1: 'ordered_product__order__pk',
2: 'ordered_product__gasstock__stock__supplier__name',
3: 'ordered_product__gasstock__stock__product__name',
4: 'ordered_price',
5: '' ,
6: 'ordered_amount',
7: 'tot_price',
8: 'enabled',
9: ''
}
#,
# 10: '' --> order_urn
def _get_user_actions(self, request):
user_actions = []
if not request.resource.gas.config.gasmember_auto_confirm_order:
#TODO seldon: does this work for a GASMember?
#if request.user.has_perm(EDIT, obj=request.resource):
if request.user == request.resource.person.user:
user_actions += [
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=CONFIRM, verbose_name=_("Confirm all"),
popup_form=False,
),
]
if request.user == request.resource.person.user:
user_actions += [
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=CREATE_PDF, verbose_name=_("Create PDF"),
popup_form=False,
method="OPENURL"
),
ResourceBlockAction(
block_name = self.BLOCK_NAME,
resource = request.resource,
name=SENDME_PDF, verbose_name=_("Send email PDF gasmember"),
popup_form=False,
)
]
return user_actions
def _get_resource_list(self, request):
#qs = request.resource.basket | request.resource.basket_to_be_delivered
qs = request.resource.basket
return qs
def _get_edit_multiple_form_class(self):
qs = self._get_resource_list(self.request)
return formset_factory(
form=BasketGASMemberOrderForm,
formset=BaseFormSetWithRequest,
extra=qs.count()
)
def _get_records(self, request, querySet):
"""Return records of rendered table fields."""
gmos = querySet
data = {}
# data2 = {}
i = 0
c = gmos.count()
# Store mapping between GSSOP-id and neededs info: formset_index and ordered_total
map_info = { }
gmo = self.resource #GASMemberOrder()
av = False
for i,el in enumerate(querySet):
key_prefix = 'form-%d' % i
data.update({
'%s-id' % key_prefix : el.pk, #gmo.pk,
'%s-ordered_amount' % key_prefix : el.ordered_amount or 0,
'%s-ordered_price' % key_prefix : el.ordered_product.order_price, #displayed as hiddend field
'%s-gm_id' % key_prefix : gmo.pk, #displayed as hiddend field !Attention is gmo_id
'%s-gsop_id' % key_prefix : el.ordered_product.pk,
'%s-enabled' % key_prefix : bool(av),
})
map_info[el.pk] = {
'formset_index' : i,
'ordered_total' : el.price_expected, # This is the total computed NOW (with ordered_product.price)
}
data['form-TOTAL_FORMS'] = c
data['form-INITIAL_FORMS'] = c
data['form-MAX_NUM_FORMS'] = 0
formset = self._get_edit_multiple_form_class()(request, data)
records = []
for i,el in enumerate(querySet):
form = formset[map_info[el.pk]['formset_index']]
total = map_info[el.pk]['ordered_total']
form.fields['ordered_amount'].widget.attrs = {
'class' : 'amount',
'step' : el.ordered_product.gasstock.step or 1,
'minimum_amount' : el.ordered_product.gasstock.minimum_amount or 1,
'eur_chan' : ["", "alert"][bool(el.has_changed)],
'req_conf' : ["alert", ""][bool(el.is_confirmed)],
's_url' : el.supplier.urn,
'p_url' : el.stock.urn,
}
#'p_url' : el.product.urn,
records.append({
'id' : "%s %s %s %s %s" % (el.pk, form['id'], form['gm_id'], form['gsop_id'], form['ordered_price']),
'order' : el.order.pk,
'supplier' : el.supplier,
'product' : el.product,
'price' : el.ordered_product.order_price,
'price_changed' : not el.has_changed,
'ordered_amount' : form['ordered_amount'], #field inizializzato con il minimo amount e che ha l'attributo step
'ordered_total' : total,
'field_enabled' : form['enabled'],
'order_confirmed' : el.is_confirmed,
'order_urn' : el.order.urn,
})
#'description' : el.product.description,
#return records, records, {}
return formset, records, {}
def _set_records(self, request, records):
pass
def get_response(self, request, resource_type, resource_id, args):
try:
rv = super(Block, self).get_response(request, resource_type, resource_id, args)
except NotImplementedError:
# Not implemented args are implemented in this method
pass
if args == CONFIRM:
for gmo in self.resource.basket:
log.debug(u"Sto confermando un ordine gasista(%s)" % gmo)
gmo.confirm()
gmo.save()
#IMPORTANT: unset args to compute table results!
args = self.KW_DATA
elif args == CREATE_PDF:
rv = self._create_pdf()
elif args == SENDME_PDF:
rv = self._send_email_logged()
#TODO FIXME: ugly patch to fix AFTERrecords.append( 6
if args == self.KW_DATA:
from gasistafelice.lib.views_support import prepare_datatables_queryset, render_datatables
querySet = self._get_resource_list(request)
#columnIndexNameMap is required for correct sorting behavior
columnIndexNameMap = self.COLUMN_INDEX_NAME_MAP
#path to template used to generate json (optional)
jsonTemplatePath = 'blocks/%s/data.json' % self.BLOCK_NAME
querySet, dt_params = prepare_datatables_queryset(request, querySet, columnIndexNameMap)
#TODO FIXME: AFTER 6
formset, records, moreData = self._get_records(request, querySet)
rv = render_datatables(request, records, dt_params, jsonTemplatePath)
return rv
def _send_email_logged(self):
try:
#WAS: to = self.request.user.email
#WAS: self.resource.send_email([to],None, 'Order Email me', self.request.user)
self.resource.send_email_to_gasmember(None, 'Order Email me', self.request.user)
return self.response_success()
except Exception, e:
return self.response_error(_('We had some errors<pre>%s</pre>') % cgi.escape(e))
def _create_pdf(self):
pdf_data = self.resource.get_pdf_data(requested_by=self.request.user)
if not pdf_data:
rv = self.response_error(_('Report not generated'))
else:
response = HttpResponse(pdf_data, mimetype='application/pdf')
response['Content-Disposition'] = "attachment; filename=" + self.resource.get_valid_name() + ".pdf"
rv = response
return rv
| matteo88/gasistafelice | gasistafelice/rest/views/blocks/basket.py | Python | agpl-3.0 | 8,830 |
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf', # Open file 'inventory.hdf'
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY', 1) # attach vdata 'INVENTORY' in write mode
# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list where
# number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 2 done')
vd[vd._nrecs:] = ( # append 2 records
('A4321', 'axe', 5, 1.5, 25), # first record
('C3214', 'cup', 100, 0.1, 3.25) # second record
)
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
| fhs/python-hdf4 | examples/inventory/inventory_1-2.py | Python | mit | 1,007 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from ..classes import HnsccDashboard
@login_required
def hnscc_dashboard(request, **kwargs):
dashboard = HnsccDashboard(
dashboard_type=kwargs.get('dashboard_type'),
dashboard_id=kwargs.get('dashboard_id'),
dashboard_model=kwargs.get('dashboard_model'),
dashboard_category=kwargs.get('dashboard_category'),
registered_subject=kwargs.get('registered_subject'),
show=kwargs.get('show'),
dashboard_type_list=['subject'], )
dashboard.set_context()
return render_to_response(
'hnscc_dashboard.html',
dashboard.context.get(),
context_instance=RequestContext(request))
| botswana-harvard/bhp065_project | bhp065/apps/hnscc_dashboard/views/hnscc_dashboard.py | Python | gpl-2.0 | 806 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class ActionRouting(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The routing types that are supported for action requests.
"""
PROXY = "Proxy"
class ProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state of the resource provider.
"""
ACCEPTED = "Accepted"
DELETING = "Deleting"
RUNNING = "Running"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class ResourceTypeRouting(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The routing types that are supported for resource requests.
"""
PROXY = "Proxy"
PROXY_CACHE = "Proxy,Cache"
class ValidationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of validation to run against a matching request.
"""
SWAGGER = "Swagger"
| Azure/azure-sdk-for-python | sdk/customproviders/azure-mgmt-customproviders/azure/mgmt/customproviders/models/_customproviders_enums.py | Python | mit | 1,961 |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 10222
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| GoobyCoin/GoobyCoin | contrib/pyminer/pyminer.py | Python | mit | 6,435 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from lxml import etree
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import Text
from tempest.services.compute.xml.common import xml_to_json
from tempest.services.compute.xml.common import XMLNS_11
XMLNS_OS_FLV_EXT_DATA = \
"http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1"
XMLNS_OS_FLV_ACCESS = \
"http://docs.openstack.org/compute/ext/flavor_access/api/v2"
class FlavorsClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(FlavorsClientXML, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
def _format_flavor(self, f):
flavor = {'links': []}
for k, v in f.items():
if k == 'link':
flavor['links'].append(v)
continue
if k == '{%s}ephemeral' % XMLNS_OS_FLV_EXT_DATA:
k = 'OS-FLV-EXT-DATA:ephemeral'
if k == '{%s}is_public' % XMLNS_OS_FLV_ACCESS:
k = 'os-flavor-access:is_public'
v = True if v == 'True' else False
if k == 'extra_specs':
k = 'OS-FLV-WITH-EXT-SPECS:extra_specs'
flavor[k] = dict(v)
continue
try:
v = int(v)
except ValueError:
try:
v = float(v)
except ValueError:
pass
flavor[k] = v
return flavor
def _parse_array(self, node):
return [self._format_flavor(xml_to_json(x)) for x in node]
def _list_flavors(self, url, params):
if params:
url += "?%s" % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
flavors = self._parse_array(etree.fromstring(body))
return resp, flavors
def list_flavors(self, params=None):
url = 'flavors'
return self._list_flavors(url, params)
def list_flavors_with_detail(self, params=None):
url = 'flavors/detail'
return self._list_flavors(url, params)
def get_flavor_details(self, flavor_id):
resp, body = self.get("flavors/%s" % str(flavor_id), self.headers)
body = xml_to_json(etree.fromstring(body))
flavor = self._format_flavor(body)
return resp, flavor
def create_flavor(self, name, ram, vcpus, disk, flavor_id, **kwargs):
"""Creates a new flavor or instance type."""
flavor = Element("flavor",
xmlns=XMLNS_11,
ram=ram,
vcpus=vcpus,
disk=disk,
id=flavor_id,
name=name)
if kwargs.get('rxtx'):
flavor.add_attr('rxtx_factor', kwargs.get('rxtx'))
if kwargs.get('swap'):
flavor.add_attr('swap', kwargs.get('swap'))
if kwargs.get('ephemeral'):
flavor.add_attr('OS-FLV-EXT-DATA:ephemeral',
kwargs.get('ephemeral'))
if kwargs.get('is_public'):
flavor.add_attr('os-flavor-access:is_public',
kwargs.get('is_public'))
flavor.add_attr('xmlns:OS-FLV-EXT-DATA', XMLNS_OS_FLV_EXT_DATA)
flavor.add_attr('xmlns:os-flavor-access', XMLNS_OS_FLV_ACCESS)
resp, body = self.post('flavors', str(Document(flavor)), self.headers)
body = xml_to_json(etree.fromstring(body))
flavor = self._format_flavor(body)
return resp, flavor
def delete_flavor(self, flavor_id):
"""Deletes the given flavor."""
return self.delete("flavors/%s" % str(flavor_id), self.headers)
def is_resource_deleted(self, id):
# Did not use get_flavor_details(id) for verification as it gives
# 200 ok even for deleted id. LP #981263
# we can remove the loop here and use get by ID when bug gets sortedout
resp, flavors = self.list_flavors_with_detail()
for flavor in flavors:
if flavor['id'] == id:
return False
return True
def set_flavor_extra_spec(self, flavor_id, specs):
"""Sets extra Specs to the mentioned flavor."""
extra_specs = Element("extra_specs")
for key in specs.keys():
extra_specs.add_attr(key, specs[key])
resp, body = self.post('flavors/%s/os-extra_specs' % flavor_id,
str(Document(extra_specs)), self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body
def get_flavor_extra_spec(self, flavor_id):
"""Gets extra Specs of the mentioned flavor."""
resp, body = self.get('flavors/%s/os-extra_specs' % flavor_id,
self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body
def get_flavor_extra_spec_with_key(self, flavor_id, key):
"""Gets extra Specs key-value of the mentioned flavor and key."""
resp, xml_body = self.get('flavors/%s/os-extra_specs/%s' %
(str(flavor_id), key), self.headers)
body = {}
element = etree.fromstring(xml_body)
key = element.get('key')
body[key] = xml_to_json(element)
return resp, body
def update_flavor_extra_spec(self, flavor_id, key, **kwargs):
"""Update extra Specs details of the mentioned flavor and key."""
doc = Document()
for (k, v) in kwargs.items():
element = Element(k)
doc.append(element)
value = Text(v)
element.append(value)
resp, body = self.put('flavors/%s/os-extra_specs/%s' %
(flavor_id, key),
str(doc), self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, {key: body}
def unset_flavor_extra_spec(self, flavor_id, key):
"""Unsets an extra spec based on the mentioned flavor and key."""
return self.delete('flavors/%s/os-extra_specs/%s' % (str(flavor_id),
key))
def _parse_array_access(self, node):
return [xml_to_json(x) for x in node]
def list_flavor_access(self, flavor_id):
"""Gets flavor access information given the flavor id."""
resp, body = self.get('flavors/%s/os-flavor-access' % str(flavor_id),
self.headers)
body = self._parse_array(etree.fromstring(body))
return resp, body
def add_flavor_access(self, flavor_id, tenant_id):
"""Add flavor access for the specified tenant."""
doc = Document()
server = Element("addTenantAccess")
doc.append(server)
server.add_attr("tenant", tenant_id)
resp, body = self.post('flavors/%s/action' % str(flavor_id),
str(doc), self.headers)
body = self._parse_array_access(etree.fromstring(body))
return resp, body
def remove_flavor_access(self, flavor_id, tenant_id):
"""Remove flavor access from the specified tenant."""
doc = Document()
server = Element("removeTenantAccess")
doc.append(server)
server.add_attr("tenant", tenant_id)
resp, body = self.post('flavors/%s/action' % str(flavor_id),
str(doc), self.headers)
body = self._parse_array_access(etree.fromstring(body))
return resp, body
| eltonkevani/tempest_el_env | tempest/services/compute/xml/flavors_client.py | Python | apache-2.0 | 8,407 |
# -*- coding: UTF-8 -*-
import haystack
from django.core.management.base import BaseCommand, CommandError
from conference import models
from collections import defaultdict
from optparse import make_option
class Command(BaseCommand):
"""
"""
option_list = BaseCommand.option_list + (
make_option('--by-ticket',
action='store_true',
dest='by_ticket',
default=False,
help='list by ticket instead of person',
),
make_option('--no-staff',
action='store_true',
dest='no_staff',
default=False,
help='exclude staff tickets',
),
)
def handle(self, *args, **options):
qs = models.Ticket.objects\
.filter(orderitem__order___complete=True)\
.exclude(fare__ticket_type='partner')\
.select_related('user', 'fare', 'p3_conference')
if options['no_staff']:
qs = qs.exclude(ticket_type='staff')
buyers = defaultdict(list)
names = defaultdict(list)
non_conference_tickets = defaultdict(list)
conference_tickets = []
alien_tickets = []
for t in qs.filter(fare__ticket_type='conference'):
name = t.name or '%s %s' % (t.user.first_name, t.user.last_name)
data = {
'name': name,
'ticket': t,
'additional': [],
}
conference_tickets.append(data)
buyers[t.user_id].append(data)
names[name].append(data)
for t in qs.exclude(fare__ticket_type='conference'):
if t.name:
if t.name in names:
founds = names[t.name]
if len(founds) == 1:
ix = 0
maybe = False
else:
maybe = True
for ix, tdata in enumerate(founds):
if tdata['ticket'].user_id == t.user_id:
maybe = False
break
else:
ix = 0
founds[ix]['additional'].append({
'ticket': t,
'maybe': maybe,
})
non_conference_tickets[t.fare].append({
'ticket': t,
'maybe': maybe,
'conference': founds[ix],
})
continue
if t.user_id in buyers:
buyers[t.user_id][0]['additional'].append({
'ticket': t,
'maybe': False,
})
non_conference_tickets[t.fare].append({
'ticket': t,
'maybe': False,
'conference': buyers[t.user_id][0],
})
continue
name = t.name or '%s %s' % (t.user.first_name, t.user.last_name)
alien_tickets.append({
'name': name,
'ticket': t,
})
non_conference_tickets[t.fare].append({
'ticket': t,
'maybe': False,
'conference': None,
})
conference_tickets.sort(key=lambda x: x['name'].upper())
alien_tickets.sort(key=lambda x: x['name'].upper())
if not options['by_ticket']:
letter = None
for t in conference_tickets:
row = [
t['name'].encode('utf-8'),
'STAFF' if t['ticket'].ticket_type == 'staff' else t['ticket'].fare.name,
t['ticket'].p3_conference.days if t['ticket'].p3_conference and t['ticket'].fare.code[2] == 'D' else '',
]
if row[0][0].upper() != letter:
letter = row[0][0].upper()
print '\n\n'
print '\t\t\t', letter
print '-' * 80
print '\n\n'
print '\t'.join(map(str, row))
for linked in t['additional']:
row = [
'%s%s' % ('(*) ' if linked['maybe'] else '', linked['ticket'].name.encode('utf-8')),
linked['ticket'].fare.code,
linked['ticket'].fare.name,
]
print '\t', '\t'.join(map(str, row))
if alien_tickets:
print '\n\n'
print '\t\t\t', 'ALIEN'
print '-' * 80
print '\n\n'
for t in alien_tickets:
row = [
t['name'].encode('utf-8'),
'STAFF' if t['ticket'].ticket_type == 'staff' else t['ticket'].fare.name,
t['ticket'].p3_conference.days if t['ticket'].p3_conference and t['ticket'].fare.code[2] == 'D' else '',
]
print '\t'.join(map(str, row))
else:
for fare, items in non_conference_tickets.items():
print '\n\n'
print '\t\t\t', fare.code, fare.name.encode('utf-8')
print '-' * 80
print '\n\n'
def k(x):
t = x['ticket']
return t.name or '%s %s' % (t.user.first_name, t.user.last_name)
for t in sorted(items, key=k):
if t['maybe']:
print '(*)',
print k(t).encode('utf-8'), '->',
if t['conference']:
print t['conference']['name'].encode('utf-8')
else:
print ''
| pythonitalia/pycon_site | p3/management/commands/ticket_list.py | Python | bsd-2-clause | 5,861 |
import pytest
from plenum.common.config_helper import PNodeConfigHelper
from plenum.common.startable import Mode
from plenum.server.replica_validator_enums import INCORRECT_PP_SEQ_NO, OLD_VIEW, ALREADY_ORDERED, STASH_WATERMARKS, \
STASH_CATCH_UP, STASH_VIEW_3PC
from plenum.test.helper import checkDiscardMsg, generate_state_root, create_prepare
from plenum.test.test_node import TestNode
from plenum.test.testing_utils import FakeSomething
@pytest.fixture(scope='function')
def test_node(
tdirWithPoolTxns,
tdirWithDomainTxns,
poolTxnNodeNames,
tdirWithNodeKeepInited,
tdir,
tconf,
allPluginsPath):
node_name = poolTxnNodeNames[0]
config_helper = PNodeConfigHelper(node_name, tconf, chroot=tdir)
node = TestNode(
node_name,
config_helper=config_helper,
config=tconf,
pluginPaths=allPluginsPath)
view_no = 1
node.view_changer = FakeSomething(view_no=view_no,
view_change_in_progress=False)
for r in node.replicas.values():
r._consensus_data.view_no = view_no
node.mode = Mode.participating
yield node
node.onStopping() # TODO stop won't call onStopping as we are in Stopped state
def test_discard_process_three_phase_msg(test_node, looper):
sender = "NodeSender"
inst_id = 0
replica = test_node.replicas[inst_id]
view_no = test_node.viewNo
pp_seq_no = 0 # should start with 1
msg = create_prepare((view_no, pp_seq_no), generate_state_root(), inst_id)
replica._external_bus.process_incoming(msg, sender)
checkDiscardMsg([replica.stasher, ], msg, INCORRECT_PP_SEQ_NO)
def test_discard_process_three_phase_msg_for_old_view(test_node, looper):
sender = "NodeSender"
inst_id = 0
replica = test_node.replicas[inst_id]
view_no = test_node.viewNo - 1
pp_seq_no = replica.last_ordered_3pc[1] + 1
msg = create_prepare((view_no, pp_seq_no), generate_state_root(), inst_id)
replica._external_bus.process_incoming(msg, sender)
checkDiscardMsg([replica.stasher, ], msg, OLD_VIEW)
def test_discard_process_three_phase_already_ordered_msg(test_node, looper):
sender = "NodeSender"
inst_id = 0
replica = test_node.replicas[inst_id]
replica.last_ordered_3pc = (test_node.viewNo, 100)
replica._checkpointer.update_watermark_from_3pc()
view_no = test_node.viewNo
pp_seq_no = replica.h
msg = create_prepare((view_no, pp_seq_no), generate_state_root(), inst_id)
replica._external_bus.process_incoming(msg, sender)
checkDiscardMsg([replica.stasher, ], msg, ALREADY_ORDERED)
def test_process_three_phase_msg_with_catchup_stash(test_node, looper):
sender = "NodeSender"
inst_id = 0
replica = test_node.replicas[inst_id]
old_catchup_stashed_msgs = replica.stasher.stash_size(STASH_CATCH_UP)
test_node.mode = Mode.syncing # catchup in process
view_no = test_node.viewNo
pp_seq_no = replica.last_ordered_3pc[1] + 1
msg = create_prepare((view_no, pp_seq_no), generate_state_root(), inst_id)
replica._external_bus.process_incoming(msg, sender)
assert old_catchup_stashed_msgs + 1 == replica.stasher.stash_size(STASH_CATCH_UP)
def test_process_three_phase_msg_and_stashed_future_view(test_node, looper):
sender = "NodeSender"
inst_id = 0
replica = test_node.replicas[inst_id]
old_stashed_future_view_msgs = replica.stasher.stash_size(STASH_VIEW_3PC)
view_no = test_node.viewNo + 1
pp_seq_no = replica.last_ordered_3pc[1] + 1
msg = create_prepare((view_no, pp_seq_no), generate_state_root(), inst_id)
replica._external_bus.process_incoming(msg, sender)
assert old_stashed_future_view_msgs + 1 == replica.stasher.stash_size(STASH_VIEW_3PC)
def test_process_three_phase_msg_and_stashed_for_next_checkpoint(test_node, looper):
sender = "NodeSender"
inst_id = 0
replica = test_node.replicas[inst_id]
old_stashed_watermarks_msgs = replica.stasher.stash_size(STASH_WATERMARKS)
view_no = test_node.viewNo
pp_seq_no = replica.H + 1
msg = create_prepare((view_no, pp_seq_no), generate_state_root(), inst_id)
replica._external_bus.process_incoming(msg, sender)
assert old_stashed_watermarks_msgs + 1 == replica.stasher.stash_size(STASH_WATERMARKS)
| evernym/plenum | plenum/test/replica/test_3pc_messages_validation.py | Python | apache-2.0 | 4,314 |
"""Development settings and globals."""
from os.path import join, normpath
from base import *
########## LOCAL DATABASE CONFIGURATION, OVERIDES BASE
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join(DJANGO_ROOT, 'local_phx.db'),
}
}
########## END DATABASE CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
'debug_toolbar',
'django_extensions',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INTERNAL_IPS = ('127.0.0.1',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
########## END TOOLBAR CONFIGURATION
| josven/phx | phx/settings/local.py | Python | mit | 1,642 |
"""A formatter which formats phone numbers as they are entered.
An AsYouTypeFormatter can be created by invoking
AsYouTypeFormatter(region_code). After that digits can be added by invoking
input_digit() on the formatter instance, and the partially formatted phone
number will be returned each time a digit is added. clear() should be invoked
before a new number needs to be formatted.
See the unit tests for more details on how the formatter is to be used.
"""
# Based on original Java code:
# java/src/com/google/i18n/phonenumbers/AsYouTypeFormatter.java
# Copyright (C) 2009-2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .util import u, unicod, U_EMPTY_STRING, U_SPACE
from .unicode_util import digit as unicode_digit
from .re_util import fullmatch
from .phonemetadata import PhoneMetadata
from .phonenumberutil import _VALID_PUNCTUATION, REGION_CODE_FOR_NON_GEO_ENTITY
from .phonenumberutil import _PLUS_SIGN, _PLUS_CHARS_PATTERN
from .phonenumberutil import _extract_country_code, region_code_for_country_code
from .phonenumberutil import country_code_for_region
from .phonenumberutil import _formatting_rule_has_first_group_only
# Character used when appropriate to separate a prefix, such as a long NDD or
# a country calling code, from the national number.
_SEPARATOR_BEFORE_NATIONAL_NUMBER = U_SPACE
_EMPTY_METADATA = PhoneMetadata(id=unicod(""),
international_prefix=unicod("NA"),
register=False)
# A pattern that is used to match character classes in regular expressions. An
# example of a character class is [1-4].
_CHARACTER_CLASS_PATTERN = re.compile(unicod("\\[([^\\[\\]])*\\]"))
# Any digit in a regular expression that actually denotes a digit. For
# example, in the regular expression 80[0-2]\d{6,10}, the first 2 digits (8
# and 0) are standalone digits, but the rest are not.
# Two look-aheads are needed because the number following \\d could be a
# two-digit number, since the phone number can be as long as 15 digits.
_STANDALONE_DIGIT_PATTERN = re.compile(unicod("\\d(?=[^,}][^,}])"))
# A set of characters that, if found in a national prefix formatting rules, are an indicator to
# us that we should separate the national prefix from the number when formatting.
_NATIONAL_PREFIX_SEPARATORS_PATTERN = re.compile("[- ]")
# A pattern that is used to determine if a number_format under
# available_formats is eligible to be used by the AYTF. It is eligible when
# the format element under number_format contains groups of the dollar sign
# followed by a single digit, separated by valid phone number
# punctuation. This prevents invalid punctuation (such as the star sign in
# Israeli star numbers) getting into the output of the AYTF.
_ELIGIBLE_FORMAT_PATTERN = re.compile(unicod("[") + _VALID_PUNCTUATION + unicod("]*") +
unicod("(\\\\\\d") + unicod("[") + _VALID_PUNCTUATION + unicod("]*)+"))
# This is the minimum length of national number accrued that is required to
# trigger the formatter. The first element of the leading_digits_pattern of each
# number_format contains a regular expression that matches up to this number of
# digits.
_MIN_LEADING_DIGITS_LENGTH = 3
# The digits that have not been entered yet will be represented by a \u2008,
# the punctuation space.
_DIGIT_PLACEHOLDER = u("\u2008")
_DIGIT_PATTERN = re.compile(_DIGIT_PLACEHOLDER)
def _get_metadata_for_region(region_code):
"""The metadata needed by this class is the same for all regions
sharing the same country calling code. Therefore, we return the
metadata for "main" region for this country calling code."""
country_calling_code = country_code_for_region(region_code)
main_country = region_code_for_country_code(country_calling_code)
# Set to a default instance of the metadata. This allows us to
# function with an incorrect region code, even if formatting only
# works for numbers specified with "+".
return PhoneMetadata.metadata_for_region(main_country, _EMPTY_METADATA)
class AsYouTypeFormatter(object):
def __init__(self, region_code):
"""Gets an AsYouTypeFormatter for the specific region.
Arguments:
region_code -- The region where the phone number is being entered
Return an AsYouTypeFormatter} object, which could be used to format
phone numbers in the specific region "as you type"
"""
self._clear()
self._default_country = region_code.upper()
self._current_metadata = _get_metadata_for_region(self._default_country)
self._default_metadata = self._current_metadata
def _maybe_create_new_template(self):
"""Returns True if a new template is created as opposed to reusing the existing template.
When there are multiple available formats, the formatter uses the
first format where a formatting template could be created.
"""
ii = 0
while ii < len(self._possible_formats):
number_format = self._possible_formats[ii]
pattern = number_format.pattern
if self._current_formatting_pattern == pattern:
return False
if self._create_formatting_template(number_format):
self._current_formatting_pattern = pattern
if number_format.national_prefix_formatting_rule is None:
self._should_add_space_after_national_prefix = False
else:
self._should_add_space_after_national_prefix = bool(_NATIONAL_PREFIX_SEPARATORS_PATTERN.search(number_format.national_prefix_formatting_rule))
# With a new formatting template, the matched position using
# the old template needs to be reset.
self._last_match_position = 0
return True
else:
# Remove the current number format from _possible_formats
del self._possible_formats[ii]
ii -= 1
ii += 1
self._able_to_format = False
return False
def _get_available_formats(self, leading_digits):
if (self._is_complete_number and
len(self._current_metadata.intl_number_format) > 0):
format_list = self._current_metadata.intl_number_format
else:
format_list = self._current_metadata.number_format
national_prefix_is_used_by_country = (self._current_metadata.national_prefix is not None)
for this_format in format_list:
if (not national_prefix_is_used_by_country or self._is_complete_number or
this_format.national_prefix_optional_when_formatting or
_formatting_rule_has_first_group_only(this_format.national_prefix_formatting_rule)):
if self._is_format_eligible(this_format.format):
self._possible_formats.append(this_format)
self._narrow_down_possible_formats(leading_digits)
def _is_format_eligible(self, format):
return fullmatch(_ELIGIBLE_FORMAT_PATTERN, format)
def _narrow_down_possible_formats(self, leading_digits):
index_of_leading_digits_pattern = len(leading_digits) - _MIN_LEADING_DIGITS_LENGTH
ii = 0
while ii < len(self._possible_formats):
num_format = self._possible_formats[ii]
ii += 1
if len(num_format.leading_digits_pattern) == 0:
# Keep everything that isn't restricted by leading digits.
continue
last_leading_digits_pattern = min(index_of_leading_digits_pattern,
len(num_format.leading_digits_pattern) - 1)
leading_digits_pattern = re.compile(num_format.leading_digits_pattern[last_leading_digits_pattern])
m = leading_digits_pattern.match(leading_digits)
if not m:
# remove the element we've just examined, now at (ii-1)
ii -= 1
self._possible_formats.pop(ii)
def _create_formatting_template(self, num_format):
number_pattern = num_format.pattern
# The formatter doesn't format numbers when number_pattern contains
# "|", e.g. (20|3)\d{4}. In those cases we quickly return.
if number_pattern.find('|') != -1:
return False
# Replace anything in the form of [..] with \d
number_pattern = re.sub(_CHARACTER_CLASS_PATTERN, unicod("\\\\d"), number_pattern)
# Replace any standalone digit (not the one in d{}) with \d
number_pattern = re.sub(_STANDALONE_DIGIT_PATTERN, unicod("\\\\d"), number_pattern)
self.formatting_template = U_EMPTY_STRING
temp_template = self._get_formatting_template(number_pattern, num_format.format)
if len(temp_template) > 0:
self._formatting_template = temp_template
return True
return False
def _get_formatting_template(self, number_pattern, number_format):
"""Gets a formatting template which can be used to efficiently
format a partial number where digits are added one by one."""
# Create a phone number consisting only of the digit 9 that matches the
# number_pattern by applying the pattern to the longest_phone_number string.
longest_phone_number = unicod("999999999999999")
number_re = re.compile(number_pattern)
m = number_re.search(longest_phone_number) # this will always succeed
a_phone_number = m.group(0)
# No formatting template can be created if the number of digits
# entered so far is longer than the maximum the current formatting
# rule can accommodate.
if len(a_phone_number) < len(self._national_number):
return U_EMPTY_STRING
# Formats the number according to number_format
template = re.sub(number_pattern, number_format, a_phone_number)
# Replaces each digit with character _DIGIT_PLACEHOLDER
template = re.sub("9", _DIGIT_PLACEHOLDER, template)
return template
def _clear(self):
"""Clears the internal state of the formatter, so it can be reused."""
self._current_output = U_EMPTY_STRING
self._accrued_input = U_EMPTY_STRING
self._accrued_input_without_formatting = U_EMPTY_STRING
self._formatting_template = U_EMPTY_STRING
self._last_match_position = 0
# The pattern from number_format that is currently used to create
# formatting_template.
self._current_formatting_pattern = U_EMPTY_STRING
# This contains anything that has been entered so far preceding the
# national significant number, and it is formatted (e.g. with space
# inserted). For example, this can contain IDD, country code, and/or
# NDD, etc.
self._prefix_before_national_number = U_EMPTY_STRING
self._should_add_space_after_national_prefix = False
# This contains the national prefix that has been extracted. It
# contains only digits without formatting.
self._extracted_national_prefix = U_EMPTY_STRING
self._national_number = U_EMPTY_STRING
# This indicates whether AsYouTypeFormatter is currently doing the
# formatting.
self._able_to_format = True
# Set to true when users enter their own
# formatting. AsYouTypeFormatter will do no formatting at all when
# this is set to True.
self._input_has_formatting = False
# The position of a digit upon which input_digit(remember_position=True) is
# most recently invoked, as found in accrued_input_without_formatting.
self._position_to_remember = 0
# The position of a digit upon which input_digit(remember_position=True) is
# most recently invoked, as found in the original sequence of
# characters the user entered.
self._original_position = 0
# This is set to true when we know the user is entering a full
# national significant number, since we have either detected a
# national prefix or an international dialing prefix. When this is
# true, we will no longer use local number formatting patterns.
self._is_complete_number = False
self._is_expecting_country_calling_code = False
self._possible_formats = []
def clear(self):
"""Clears the internal state of the formatter, so it can be reused."""
self._clear()
if self._current_metadata != self._default_metadata:
self._current_metadata = _get_metadata_for_region(self._default_country)
def input_digit(self, next_char, remember_position=False):
"""Formats a phone number on-the-fly as each digit is entered.
If remember_position is set, remembers the position where next_char is
inserted, so that it can be retrieved later by using
get_remembered_position. The remembered position will be automatically
adjusted if additional formatting characters are later
inserted/removed in front of next_char.
Arguments:
next_char -- The most recently entered digit of a phone
number. Formatting characters are allowed, but as soon as they
are encountered this method formats the number as entered and
not "as you type" anymore. Full width digits and Arabic-indic
digits are allowed, and will be shown as they are.
remember_position -- Whether to track the position where next_char is
inserted.
Returns the partially formatted phone number.
"""
self._accrued_input += next_char
if remember_position:
self._original_position = len(self._accrued_input)
# We do formatting on-the-fly only when each character entered is
# either a digit, or a plus sign (accepted at the start of the number
# only).
if not self._is_digit_or_leading_plus_sign(next_char):
self._able_to_format = False
self._input_has_formatting = True
else:
next_char = self._normalize_and_accrue_digits_and_plus_sign(next_char, remember_position)
if not self._able_to_format:
# When we are unable to format because of reasons other than that
# formatting chars have been entered, it can be due to really long
# IDDs or NDDs. If that is the case, we might be able to do
# formatting again after extracting them.
if self._input_has_formatting:
self._current_output = self._accrued_input
return self._current_output
elif self._attempt_to_extract_idd():
if self._attempt_to_extract_ccc():
self._current_output = self._attempt_to_choose_pattern_with_prefix_extracted()
return self._current_output
elif self._able_to_extract_longer_ndd():
# Add an additional space to separate long NDD and national
# significant number for readability. We don't set
# should_add_space_after_national_prefix to True, since we don't
# want this to change later when we choose formatting
# templates.
self._prefix_before_national_number += _SEPARATOR_BEFORE_NATIONAL_NUMBER
self._current_output = self._attempt_to_choose_pattern_with_prefix_extracted()
return self._current_output
self._current_output = self._accrued_input
return self._current_output
# We start to attempt to format only when at least
# MIN_LEADING_DIGITS_LENGTH digits (the plus sign is counted as a
# digit as well for this purpose) have been entered.
len_input = len(self._accrued_input_without_formatting)
if len_input >= 0 and len_input <= 2:
self._current_output = self._accrued_input
return self._current_output
elif len_input == 3:
if self._attempt_to_extract_idd():
self._is_expecting_country_calling_code = True
else:
# No IDD or plus sign is found, might be entering in national format.
self._extracted_national_prefix = self._remove_national_prefix_from_national_number()
self._current_output = self._attempt_to_choose_formatting_pattern()
return self._current_output
if self._is_expecting_country_calling_code:
if self._attempt_to_extract_ccc():
self._is_expecting_country_calling_code = False
self._current_output = self._prefix_before_national_number + self._national_number
return self._current_output
if len(self._possible_formats) > 0: # The formatting patterns are already chosen.
temp_national_number = self._input_digit_helper(next_char)
# See if the accrued digits can be formatted properly already. If
# not, use the results from input_digit_helper, which does
# formatting based on the formatting pattern chosen.
formatted_number = self._attempt_to_format_accrued_digits()
if len(formatted_number) > 0:
self._current_output = formatted_number
return self._current_output
self._narrow_down_possible_formats(self._national_number)
if self._maybe_create_new_template():
self._current_output = self._input_accrued_national_number()
return self._current_output
if self._able_to_format:
self._current_output = self._append_national_number(temp_national_number)
return self._current_output
else:
self._current_output = self._accrued_input
return self._current_output
else:
self._current_output = self._attempt_to_choose_formatting_pattern()
return self._current_output
def _attempt_to_choose_pattern_with_prefix_extracted(self):
self._able_to_format = True
self._is_expecting_country_calling_code = False
self._possible_formats = []
return self._attempt_to_choose_formatting_pattern()
# Some national prefixes are a substring of others. If extracting the
# shorter NDD doesn't result in a number we can format, we try to see if
# we can extract a longer version here.
def _able_to_extract_longer_ndd(self):
if len(self._extracted_national_prefix) > 0:
# Put the extracted NDD back to the national number before
# attempting to extract a new NDD.
self._national_number = self._extracted_national_prefix + self._national_number
# Remove the previously extracted NDD from
# prefixBeforeNationalNumber. We cannot simply set it to empty
# string because people sometimes incorrectly enter national
# prefix after the country code, e.g. +44 (0)20-1234-5678.
index_of_previous_ndd = self._prefix_before_national_number.rfind(self._extracted_national_prefix)
self._prefix_before_national_number = self._prefix_before_national_number[:index_of_previous_ndd]
return self._extracted_national_prefix != self._remove_national_prefix_from_national_number()
def _is_digit_or_leading_plus_sign(self, next_char):
return (next_char.isdigit() or
(len(self._accrued_input) == 1 and
fullmatch(_PLUS_CHARS_PATTERN, next_char)))
def _attempt_to_format_accrued_digits(self):
"""Check to see if there is an exact pattern match for these digits. If so, we should use this
instead of any other formatting template whose leadingDigitsPattern also matches the input.
"""
for number_format in self._possible_formats:
num_re = re.compile(number_format.pattern)
if fullmatch(num_re, self._national_number):
if number_format.national_prefix_formatting_rule is None:
self._should_add_space_after_national_prefix = False
else:
self._should_add_space_after_national_prefix = bool(_NATIONAL_PREFIX_SEPARATORS_PATTERN.search(number_format.national_prefix_formatting_rule))
formatted_number = re.sub(num_re, number_format.format, self._national_number)
return self._append_national_number(formatted_number)
return U_EMPTY_STRING
def get_remembered_position(self):
"""Returns the current position in the partially formatted phone
number of the character which was previously passed in as the
parameter of input_digit(remember_position=True)."""
if not self._able_to_format:
return self._original_position
accrued_input_index = 0
current_output_index = 0
while (accrued_input_index < self._position_to_remember and
current_output_index < len(self._current_output)):
if (self._accrued_input_without_formatting[accrued_input_index] ==
self._current_output[current_output_index]):
accrued_input_index += 1
current_output_index += 1
return current_output_index
def _append_national_number(self, national_number):
"""Combines the national number with any prefix (IDD/+ and country
code or national prefix) that was collected. A space will be inserted
between them if the current formatting template indicates this to be
suitable.
"""
prefix_before_nn_len = len(self._prefix_before_national_number)
if (self._should_add_space_after_national_prefix and prefix_before_nn_len > 0 and
self._prefix_before_national_number[-1] != _SEPARATOR_BEFORE_NATIONAL_NUMBER):
# We want to add a space after the national prefix if the national
# prefix formatting rule indicates that this would normally be
# done, with the exception of the case where we already appended a
# space because the NDD was surprisingly long.
return self._prefix_before_national_number + _SEPARATOR_BEFORE_NATIONAL_NUMBER + national_number
else:
return self._prefix_before_national_number + national_number
def _attempt_to_choose_formatting_pattern(self):
"""Attempts to set the formatting template and returns a string which
contains the formatted version of the digits entered so far."""
# We start to attempt to format only when at least MIN_LEADING_DIGITS_LENGTH digits of national
# number (excluding national prefix) have been entered.
if len(self._national_number) >= _MIN_LEADING_DIGITS_LENGTH:
self._get_available_formats(self._national_number)
# See if the accrued digits can be formatted properly already.
formatted_number = self._attempt_to_format_accrued_digits()
if len(formatted_number) > 0:
return formatted_number
if self._maybe_create_new_template():
return self._input_accrued_national_number()
else:
return self._accrued_input
else:
return self._append_national_number(self._national_number)
def _input_accrued_national_number(self):
"""Invokes input_digit_helper on each digit of the national number
accrued, and returns a formatted string in the end."""
length_of_national_number = len(self._national_number)
if length_of_national_number > 0:
temp_national_number = U_EMPTY_STRING
for ii in range(length_of_national_number):
temp_national_number = self._input_digit_helper(self._national_number[ii])
if self._able_to_format:
return self._append_national_number(temp_national_number)
else:
return self._accrued_input
else:
return self._prefix_before_national_number
def _is_nanpa_number_with_national_prefix(self):
"""Returns true if the current country is a NANPA country and the
national number begins with the national prefix.
"""
# For NANPA numbers beginning with 1[2-9], treat the 1 as the national
# prefix. The reason is that national significant numbers in NANPA
# always start with [2-9] after the national prefix. Numbers
# beginning with 1[01] can only be short/emergency numbers, which
# don't need the national prefix.
return (self._current_metadata.country_code == 1 and self._national_number[0] == '1' and
self._national_number[1] != '0' and self._national_number[1] != '1')
def _remove_national_prefix_from_national_number(self):
start_of_national_number = 0
if self._is_nanpa_number_with_national_prefix():
start_of_national_number = 1
self._prefix_before_national_number += unicod("1") + _SEPARATOR_BEFORE_NATIONAL_NUMBER
self._is_complete_number = True
elif self._current_metadata.national_prefix_for_parsing is not None:
npp_re = re.compile(self._current_metadata.national_prefix_for_parsing)
m = npp_re.match(self._national_number)
# Since some national prefix patterns are entirely optional, check
# that a national prefix could actually be extracted.
if m and m.end() > 0:
# When the national prefix is detected, we use international
# formatting rules instead of national ones, because national
# formatting rules could contain local formatting rules for
# numbers entered without area code.
self._is_complete_number = True
start_of_national_number = m.end()
self._prefix_before_national_number += self._national_number[:start_of_national_number]
national_prefix = self._national_number[:start_of_national_number]
self._national_number = self._national_number[start_of_national_number:]
return national_prefix
def _attempt_to_extract_idd(self):
"""Extracts IDD and plus sign to self._prefix_before_national_number
when they are available, and places the remaining input into
_national_number.
Returns True when accrued_input_without_formatting begins with the plus sign or valid IDD for
default_country.
"""
international_prefix = re.compile(unicod("\\") + _PLUS_SIGN + unicod("|") +
(self._current_metadata.international_prefix or U_EMPTY_STRING))
idd_match = international_prefix.match(self._accrued_input_without_formatting)
if idd_match:
self._is_complete_number = True
start_of_country_calling_code = idd_match.end()
self._national_number = self._accrued_input_without_formatting[start_of_country_calling_code:]
self._prefix_before_national_number = self._accrued_input_without_formatting[:start_of_country_calling_code]
if self._accrued_input_without_formatting[0] != _PLUS_SIGN:
self._prefix_before_national_number += _SEPARATOR_BEFORE_NATIONAL_NUMBER
return True
return False
def _attempt_to_extract_ccc(self):
"""Extracts the country calling code from the beginning of
_national_number to _prefix_before_national_number when they are
available, and places the remaining input into _national_number.
Returns True when a valid country calling code can be found.
"""
if len(self._national_number) == 0:
return False
number_without_ccc = U_EMPTY_STRING
country_code, number_without_ccc = _extract_country_code(self._national_number)
if country_code == 0:
return False
self._national_number = number_without_ccc
new_region_code = region_code_for_country_code(country_code)
if new_region_code == REGION_CODE_FOR_NON_GEO_ENTITY:
self._current_metadata = PhoneMetadata.metadata_for_nongeo_region(country_code)
elif new_region_code != self._default_country:
self._current_metadata = _get_metadata_for_region(new_region_code)
self._prefix_before_national_number += str(country_code)
self._prefix_before_national_number += _SEPARATOR_BEFORE_NATIONAL_NUMBER
# When we have successfully extracted the IDD, the previously
# extracted NDD should be cleared because it is no longer valid.
self._extracted_national_prefix = U_EMPTY_STRING
return True
def _normalize_and_accrue_digits_and_plus_sign(self, next_char, remember_position):
"""Accrues digits and the plus sign to
_accrued_input_without_formatting for later use. If next_char contains
a digit in non-ASCII format (e.g. the full-width version of digits),
it is first normalized to the ASCII version. The return value is
next_char itself, or its normalized version, if next_char is a digit
in non-ASCII format. This method assumes its input is either a digit
or the plus sign."""
if next_char == _PLUS_SIGN:
normalized_char = next_char
self._accrued_input_without_formatting += next_char
else:
next_digit = unicode_digit(next_char, -1)
if next_digit != -1:
normalized_char = unicod(next_digit)
else: # pragma no cover
normalized_char = next_char
self._accrued_input_without_formatting += normalized_char
self._national_number += normalized_char
if remember_position:
self._position_to_remember = len(self._accrued_input_without_formatting)
return normalized_char
def _input_digit_helper(self, next_char):
digit_match = _DIGIT_PATTERN.search(self._formatting_template, self._last_match_position)
if digit_match:
# Reset to search for _DIGIT_PLACEHOLDER from start of string
digit_match = _DIGIT_PATTERN.search(self._formatting_template)
temp_template = re.sub(_DIGIT_PATTERN,
next_char,
self._formatting_template,
count=1)
self._formatting_template = temp_template + self._formatting_template[len(temp_template):]
self._last_match_position = digit_match.start()
return self._formatting_template[:self._last_match_position + 1]
else:
if len(self._possible_formats) == 1:
# More digits are entered than we could handle, and there are
# no other valid patterns to try.
self._able_to_format = False
# else, we just reset the formatting pattern.
self._current_formatting_pattern = U_EMPTY_STRING
return self._accrued_input
| WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/phonenumbers/asyoutypeformatter.py | Python | bsd-3-clause | 31,690 |
from SoundManager import *
import time
import sys
sm = SoundManager()
sm.daemon = True
sm.start()
sm.play_sound(sys.argv[1])
sm.play_sound(sys.argv[2])
while True:
time.sleep(1)
| mendrew/questroom-lovecraft | sound_test/sm_test_callback.py | Python | gpl-2.0 | 185 |
# -*- coding: utf-8 -*-
import urllib, urllib2
from datetime import datetime
SPEEDURL = 'http://speed.pypy.org/'
def save(project, revision, results, options, executable, int_options, host, testing=False):
testparams = []
#Parse data
data = {}
current_date = datetime.today()
for b in results:
bench_name = b[0]
res_type = b[1]
results = b[2]
value = 0
if res_type == "SimpleComparisonResult":
value = results['base_time']
elif res_type == "ComparisonResult":
value = results['avg_base']
else:
print("ERROR: result type unknown " + b[1])
return 1
data = {
'commitid': revision,
'project': project,
'executable_name': executable,
'executable_coptions': int_options,
'benchmark': bench_name,
'environment': host,
'result_value': value,
'result_date': current_date,
}
if res_type == "ComparisonResult":
data['std_dev'] = results['std_changed']
if testing: testparams.append(data)
else: send(data)
if testing: return testparams
else: return 0
def send(data):
#save results
params = urllib.urlencode(data)
f = None
response = "None"
info = str(datetime.today()) + ": Saving result for " + data['executable_name'] + " revision "
info += str(data['commitid']) + ", benchmark " + data['benchmark']
print(info)
try:
f = urllib2.urlopen(SPEEDURL + 'result/add/', params)
response = f.read()
f.close()
except urllib2.URLError, e:
if hasattr(e, 'reason'):
response = '\n We failed to reach a server\n'
response += ' Reason: ' + str(e.reason)
elif hasattr(e, 'code'):
response = '\n The server couldn\'t fulfill the request\n'
response += ' Error code: ' + str(e)
print("Server (%s) response: %s\n" % (SPEEDURL, response))
return 1
print "saved correctly!\n"
return 0
| camillobruni/pinocchioCodespeed | tools/pypy/savecpython.py | Python | gpl-3.0 | 2,103 |
import numpy as np
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
def main():
"""
Van der Pol's oscillator reference solution
"""
# set time parameters
t0 = 0.0
Tend = 10.0
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-12
level_params['dt'] = (Tend - t0) / 2000.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = 5
sweeper_params['QI'] = 'IE'
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1E-14
problem_params['newton_maxiter'] = 50
problem_params['mu'] = 10
problem_params['u0'] = (2.0, 0)
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# instantiate the controller
controller_ref = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller_ref.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uend_ref, stats_ref = controller_ref.run(u0=uinit, t0=t0, Tend=Tend)
np.save('data/vdp_ref.npy', uend_ref)
if __name__ == "__main__":
main()
| Parallel-in-Time/pySDC | pySDC/projects/RDC/vanderpol_reference.py | Python | bsd-2-clause | 2,054 |
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import pkg_resources as res
from pyi_importers import FrozenImporter
# To make pkg_resources work with froze moduels we need to set the 'Provider'
# class for FrozenImporter. This class decides where to look for resources
# and other stuff. 'pkg_resources.NullProvider' is dedicated to PEP302
# import hooks like FrozenImporter is. It uses method __loader__.get_data() in
# methods pkg_resources.resource_string() and pkg_resources.resource_stream()
res.register_loader_type(FrozenImporter, res.NullProvider)
| timeyyy/PyUpdater | pyupdater/vendor/PyInstaller/loader/rthooks/pyi_rth_pkgres.py | Python | bsd-2-clause | 916 |
import scrapy
from scrapy.crawler import CrawlerProcess
import json
import mysql.connector
import logging
import datetime
from w3lib.html import remove_tags
from urllib.parse import urlparse
class Spider(scrapy.Spider):
name = 'Spider'
def __init__(self, p_cnx, p_sources, p_keywords):
self.cnx = p_cnx
self.cursor = self.cnx.cursor()
self.sources = p_sources
self.keywords = p_keywords
self.data = []
def start_requests(self):
for source in self.sources:
yield scrapy.Request(url=source['url'], callback=self.parse)
def check_article(self, p_article_title):
check_article_query = 'SELECT count(id) FROM articles WHERE title=%s'
try:
self.cursor.execute(check_article_query, (p_article_title,))
nb_articles = self.cursor.fetchone()
if nb_articles[0] >= 1:
return 1
else:
return 0
except Exception as e:
logger.error("check_article" + str(e.args))
return None
def check_source(self, p_source_url):
query = 'SELECT id FROM sources WHERE url = %s'
try:
self.cursor.execute(query, (p_source_url,))
source_id_row = self.cursor.fetchone()
if source_id_row is None:
# add source to db
insert_src_query = 'INSERT INTO sources SET name=%s, url=%s, description="New site..."'
self.cursor.execute(insert_src_query, (p_source_url.split('/')[-2], p_source_url))
self.cnx.commit()
source_id = self.cursor.lastrowid
else:
source_id = source_id_row[0]
return source_id
except Exception as e:
logger.error("check_source" + str(e.args))
return None
def parse(self, response):
logger.info('accessed %s' % response.url)
for source in self.sources:
if source['url'] == response.url:
posts_tag = source['tags']['posts']
titles_tag = source['tags']['title']
content_tag = source['tags']['content']
link_tag = source['tags']['link']
for post in response.css(posts_tag):
try:
post_title = post.css(titles_tag).extract_first()
if post_title is not None:
post_title = remove_tags(post_title)
post_content = remove_tags(post.css(content_tag).extract_first())
post_link = remove_tags(post.css(link_tag).extract_first())
title_lower_case = post_title.lower()
if post_link[0] == '/':
url_parsed = urlparse(response.url)
url = url_parsed[0] + '://' + url_parsed[1]
post_link = url + post_link
collect = 0
for keyword in self.keywords:
if title_lower_case.find(keyword) > 0:
collect = 1
if collect == 1:
self.data.append(
dict(src=response.url, title=post_title, content=post_content, link=post_link))
except Exception as e:
logger.error("parse" + str(e.args))
def flush_data(self):
for datum in self.data:
try:
source_id = self.check_source(datum['src'])
if self.check_article(datum['title']) == 0:
insert_article_query = 'INSERT INTO articles SET title=%s, summary=%s, crawl_date= %s, src= %s, url=%s'
self.cursor.execute(insert_article_query,
(datum['title'], datum['content'], datetime.datetime.now(), source_id,
datum['link'],))
self.cnx.commit()
except Exception as e:
logger.error("flush_data: " + str(e.args))
def __del__(self):
self.flush_data()
self.cursor.close()
self.cnx.close()
def main():
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
logger.info('main: Launched crawler')
process.crawl(Spider, cnx, sources, keywords)
process.start() # the script will block here until the crawling is finished
logger.info('main: Finished execution')
if __name__ == '__main__':
# logging configuration
logger = logging.getLogger('crawler')
hdlr = logging.FileHandler('lib/crawler.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
logger.info('Started')
# sources & db configuration
sources = None
cnx = None
keywords = None
try:
with open('lib/config.json') as json_data:
# opening database connection
data = json.load(json_data)
database_config = data['database']
try:
cnx = mysql.connector.connect(user=database_config['username'],
password=database_config['password'],
host=database_config['host'],
database=database_config['database'])
logger.info('Connected to database')
except Exception as e:
logger.error(e.args)
# getting the sources from the config file
sources = data['sources']
keywords = data['keywords']
except Exception as e:
logger.error(e.args)
main()
| Xaess/crawler | main.py | Python | mit | 5,944 |
"""
Module servant uniquement à lancer le jeu
"""
from jeu import jouer
if __name__ == "__main__":
jouer()
| UnderXirox/Python-3_Des-fichiers-complementaires | Guide/20_Module_2/lanceur.py | Python | gpl-3.0 | 115 |
from scipy.optimize import curve_fit
from numpy import *
import matplotlib.pyplot as plt
# Create a function
# ==> First encounter with *whitespace* in Python <==
def gaussian(x, a, b, c):
val = a * exp(-(x - b)**2 / c**2)
return val
# Generate fake data.
# Note: functions in random package, array arithmetic (exp)
n = 100
x = random.uniform(-10., 10., n)
y = exp(-(x - 3.)**2 / 4) * 10. + random.normal(0., 2., n)
e = random.uniform(0.1, 1., n)
# Note: these error bars don't reflect the distribution from which
# they were drawn! Chi^2 of the fit will be poor.
# Fit
popt, pcov = curve_fit(gaussian, x, y, sigma=e)
# Print results
print("Scale = %.3f +/- %.3f" % (popt[0], sqrt(pcov[0, 0])))
print("Offset = %.3f +/- %.3f" % (popt[1], sqrt(pcov[1, 1])))
print("Sigma = %.3f +/- %.3f" % (popt[2], sqrt(pcov[2, 2])))
# Plot data
plt.errorbar(x, y, yerr=e, linewidth=1, color='black', fmt=".")
# Plot model
xm = linspace(-10., 10., 100) # 100 evenly spaced points
plt.plot(xm, gaussian(xm, popt[0], popt[1], popt[2]))
plt.show() | azariven/BioSig_SEAS | bin/dev/random_code.py | Python | gpl-3.0 | 1,048 |
#!/usr/bin/env python
from distutils.core import setup
import sys, shutil
def do(cmd):
if len(sys.argv)<2: sys.argv.append(cmd)
else: sys.argv[1]=cmd
setup(
name='SAM',
version='1.0',
description='Cyberoam Account Manager',
author='Viranch Mehta / Mohit Kothari',
author_email='viranch.mehta@gmail.com / mohitrajkothari@gmail.com',
url='http://www.butbucket.org/viranch/sam',
packages=['sam'],
scripts=['scripts/sam'],
)
do('build')
do('install')
shutil.rmtree('build')
| viranch/sam | setup.py | Python | bsd-2-clause | 495 |
#!/usr/bin/python
import yaml
config = yaml.safe_load(open("config.yml"))
twilio_account_sid = config["twilio"]["account_sid"]
twilio_auth_token = config["twilio"]["auth_token"]
twilio_from_number = config["twilio"]["from_number"]
from twilio.rest import TwilioRestClient
twilio_client = TwilioRestClient(twilio_account_sid, twilio_auth_token)
from contacts import Contacts, Contact
c = Contacts()
# syntax: text.py <contact> <message>
import sys
script_name = sys.argv.pop(0)
name = sys.argv.pop(0)
msg = " ".join([str(x) for x in sys.argv])
contact = c.find_contact_by_name(name)
if contact and msg:
print("from " + str(twilio_from_number))
message = twilio_client.messages.create(
body=msg,
from_=twilio_from_number,
to=contact.number
)
print("message is " + message.sid)
else:
print("couldn't find contact '" + name + "' or empty message")
| alexshepard/aledison | text.py | Python | mit | 877 |
# ext/preprocessors.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""preprocessing functions, used with the 'preprocessor'
argument on Template, TemplateLookup"""
import re
def convert_comments(text):
"""preprocess old style comments.
example:
from mako.ext.preprocessors import convert_comments
t = Template(..., preprocessor=convert_comments)"""
return re.sub(r"(?<=\n)\s*#[^#]", "##", text)
| scheib/chromium | third_party/mako/mako/ext/preprocessors.py | Python | bsd-3-clause | 576 |
import math
import random
def gcd(_a, _b):
"""Returns the Greatest Common Divisor from two integers"""
while _b:
_t, _a = _a, _b
_b = _t % _b
return _a
if __name__ == "__main__":
assert gcd(60, 96) == 12
assert gcd(20, 8) == 4
for _i in range(20):
_a, _b = random.randint(10, 100), random.randint(10, 100)
assert gcd(_a, _b) == math.gcd(_a, _b)
| Moggi/python-playground | HRank/gcd.py | Python | gpl-2.0 | 405 |
# -*- coding: utf-8 -*-
__all__ = ["cc"]
| MarieVdS/ComboCode | __init__.py | Python | gpl-3.0 | 42 |
'''Testing numerical differentiation
Still some problems, with API (args tuple versus *args)
finite difference Hessian has some problems that I didn't look at yet
Should Hessian also work per observation, if fun returns 2d
'''
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import statsmodels.api as sm
from statsmodels.tools import numdiff
from statsmodels.tools.numdiff import (approx_fprime, approx_fprime_cs,
approx_hess_cs)
DEC3 = 3
DEC4 = 4
DEC5 = 5
DEC6 = 6
DEC8 = 8
DEC13 = 13
DEC14 = 14
def maxabs(x,y):
return np.abs(x-y).max()
def fun(beta, x):
return np.dot(x, beta).sum(0)
def fun1(beta, y, x):
#print beta.shape, x.shape
xb = np.dot(x, beta)
return (y-xb)**2 #(xb-xb.mean(0))**2
def fun2(beta, y, x):
#print beta.shape, x.shape
return fun1(beta, y, x).sum(0)
#ravel() added because of MNLogit 2d params
class CheckGradLoglikeMixin(object):
def test_score(self):
for test_params in self.params:
sc = self.mod.score(test_params)
scfd = numdiff.approx_fprime(test_params.ravel(),
self.mod.loglike)
assert_almost_equal(sc, scfd, decimal=1)
sccs = numdiff.approx_fprime_cs(test_params.ravel(),
self.mod.loglike)
assert_almost_equal(sc, sccs, decimal=11)
def test_hess(self):
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_allclose(he, hefd, rtol=5e-10)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=False)
assert_almost_equal(he, hefd, decimal=4)
hescs = numdiff.approx_fprime_cs(test_params.ravel(),
self.mod.score)
assert_allclose(he, hescs, rtol=1e-13)
hecs = numdiff.approx_hess_cs(test_params.ravel(),
self.mod.loglike)
assert_allclose(he, hecs, rtol=1e-9)
#NOTE: Look at the lack of precision - default epsilon not always
#best
grad = self.mod.score(test_params)
hecs, gradcs = numdiff.approx_hess1(test_params, self.mod.loglike,
1e-6, return_grad=True)
assert_almost_equal(he, hecs, decimal=1)
assert_almost_equal(grad, gradcs, decimal=1)
hecs, gradcs = numdiff.approx_hess2(test_params, self.mod.loglike,
1e-4, return_grad=True)
assert_almost_equal(he, hecs, decimal=3)
assert_almost_equal(grad, gradcs, decimal=1)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-5)
assert_almost_equal(he, hecs, decimal=4)
class TestGradMNLogit(CheckGradLoglikeMixin):
def __init__(self):
#from results.results_discrete import Anes
data = sm.datasets.anes96.load()
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
self.mod = sm.MNLogit(data.endog, exog)
#def loglikeflat(self, params):
#reshapes flattened params
# return self.loglike(params.reshape(6,6))
#self.mod.loglike = loglikeflat #need instance method
#self.params = [np.ones((6,6)).ravel()]
res = self.mod.fit(disp=0)
self.params = [res.params.ravel('F')]
def test_hess(self):
#NOTE: I had to overwrite this to lessen the tolerance
for test_params in self.params:
he = self.mod.hessian(test_params)
hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hefd, decimal=DEC8)
#NOTE: notice the accuracy below and the epsilon changes
# this doesn't work well for score -> hessian with non-cs step
# it's a little better around the optimum
assert_almost_equal(he, hefd, decimal=7)
hefd = numdiff.approx_fprime(test_params, self.mod.score,
centered=True)
assert_almost_equal(he, hefd, decimal=4)
hefd = numdiff.approx_fprime(test_params, self.mod.score, 1e-9,
centered=False)
assert_almost_equal(he, hefd, decimal=2)
hescs = numdiff.approx_fprime_cs(test_params, self.mod.score)
assert_almost_equal(he, hescs, decimal=DEC8)
hecs = numdiff.approx_hess_cs(test_params, self.mod.loglike)
assert_almost_equal(he, hecs, decimal=5)
#NOTE: these just don't work well
#hecs = numdiff.approx_hess1(test_params, self.mod.loglike, 1e-3)
#assert_almost_equal(he, hecs, decimal=1)
#hecs = numdiff.approx_hess2(test_params, self.mod.loglike, 1e-4)
#assert_almost_equal(he, hecs, decimal=0)
hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-4)
assert_almost_equal(he, hecs, decimal=0)
class TestGradLogit(CheckGradLoglikeMixin):
def __init__(self):
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
#mod = sm.Probit(data.endog, data.exog)
self.mod = sm.Logit(data.endog, data.exog)
#res = mod.fit(method="newton")
self.params = [np.array([1,0.25,1.4,-7])]
##loglike = mod.loglike
##score = mod.score
##hess = mod.hessian
class CheckDerivativeMixin(object):
def __init__(self):
nobs = 200
#x = np.arange(nobs*3).reshape(nobs,-1)
np.random.seed(187678)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xkols = np.dot(np.linalg.pinv(x),y)
self.x = x
self.y = y
self.params = [np.array([1.,1.,1.]), xkols]
self.init()
def init(self):
pass
def test_grad_fun1_fd(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
epsilon = 1e-6
gfd = numdiff.approx_fprime(test_params, fun, epsilon=epsilon,
args=self.args)
gfd += numdiff.approx_fprime(test_params, fun, epsilon=-epsilon,
args=self.args)
gfd /= 2.
assert_almost_equal(gtrue, gfd, decimal=DEC6)
def test_grad_fun1_fdc(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
epsilon = 1e-6 #default epsilon 1e-6 is not precise enough
gfd = numdiff.approx_fprime(test_params, fun, epsilon=1e-8,
args=self.args, centered=True)
assert_almost_equal(gtrue, gfd, decimal=DEC5)
def test_grad_fun1_cs(self):
for test_params in self.params:
#gtrue = self.x.sum(0)
gtrue = self.gradtrue(test_params)
fun = self.fun()
gcs = numdiff.approx_fprime_cs(test_params, fun, args=self.args)
assert_almost_equal(gtrue, gcs, decimal=DEC13)
def test_hess_fun1_fd(self):
for test_params in self.params:
#hetrue = 0
hetrue = self.hesstrue(test_params)
if not hetrue is None: #Hessian doesn't work for 2d return of fun
fun = self.fun()
#default works, epsilon 1e-6 or 1e-8 is not precise enough
hefd = numdiff.approx_hess1(test_params, fun, #epsilon=1e-8,
args=self.args)
#TODO:should be kwds
assert_almost_equal(hetrue, hefd, decimal=DEC3)
#TODO: I reduced precision to DEC3 from DEC4 because of
# TestDerivativeFun
hefd = numdiff.approx_hess2(test_params, fun, #epsilon=1e-8,
args=self.args)
#TODO:should be kwds
assert_almost_equal(hetrue, hefd, decimal=DEC3)
hefd = numdiff.approx_hess3(test_params, fun, #epsilon=1e-8,
args=self.args)
#TODO:should be kwds
assert_almost_equal(hetrue, hefd, decimal=DEC3)
def test_hess_fun1_cs(self):
for test_params in self.params:
#hetrue = 0
hetrue = self.hesstrue(test_params)
if not hetrue is None: #Hessian doesn't work for 2d return of fun
fun = self.fun()
hecs = numdiff.approx_hess_cs(test_params, fun, args=self.args)
assert_almost_equal(hetrue, hecs, decimal=DEC6)
class TestDerivativeFun(CheckDerivativeMixin):
def init(self):
xkols = np.dot(np.linalg.pinv(self.x), self.y)
self.params = [np.array([1.,1.,1.]), xkols]
self.args = (self.x,)
def fun(self):
return fun
def gradtrue(self, params):
return self.x.sum(0)
def hesstrue(self, params):
return np.zeros((3,3)) #make it (3,3), because test fails with scalar 0
#why is precision only DEC3
class TestDerivativeFun2(CheckDerivativeMixin):
def init(self):
xkols = np.dot(np.linalg.pinv(self.x), self.y)
self.params = [np.array([1.,1.,1.]), xkols]
self.args = (self.y, self.x)
def fun(self):
return fun2
def gradtrue(self, params):
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None]).sum(0)
#2*(y-np.dot(x, params)).sum(0)
def hesstrue(self, params):
x = self.x
return 2*np.dot(x.T, x)
class TestDerivativeFun1(CheckDerivativeMixin):
def init(self):
xkols = np.dot(np.linalg.pinv(self.x), self.y)
self.params = [np.array([1.,1.,1.]), xkols]
self.args = (self.y, self.x)
def fun(self):
return fun1
def gradtrue(self, params):
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None])
def hesstrue(self, params):
return None
y, x = self.y, self.x
return (-x*2*(y-np.dot(x, params))[:,None]) #TODO: check shape
if __name__ == '__main__':
epsilon = 1e-6
nobs = 200
x = np.arange(nobs*3).reshape(nobs,-1)
x = np.random.randn(nobs,3)
xk = np.array([1,2,3])
xk = np.array([1.,1.,1.])
#xk = np.zeros(3)
beta = xk
y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
xkols = np.dot(np.linalg.pinv(x),y)
print approx_fprime((1,2,3),fun,epsilon,x)
gradtrue = x.sum(0)
print x.sum(0)
gradcs = approx_fprime_cs((1,2,3), fun, (x,), h=1.0e-20)
print gradcs, maxabs(gradcs, gradtrue)
print approx_hess_cs((1,2,3), fun, (x,), h=1.0e-20) #this is correctly zero
print approx_hess_cs((1,2,3), fun2, (y,x), h=1.0e-20)-2*np.dot(x.T, x)
print numdiff.approx_hess(xk,fun2,1e-3, (y,x))[0] - 2*np.dot(x.T, x)
gt = (-x*2*(y-np.dot(x, [1,2,3]))[:,None])
g = approx_fprime_cs((1,2,3), fun1, (y,x), h=1.0e-20)#.T #this shouldn't be transposed
gd = numdiff.approx_fprime((1,2,3),fun1,epsilon,(y,x))
print maxabs(g, gt)
print maxabs(gd, gt)
import statsmodels.api as sm
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
#mod = sm.Probit(data.endog, data.exog)
mod = sm.Logit(data.endog, data.exog)
#res = mod.fit(method="newton")
test_params = [1,0.25,1.4,-7]
loglike = mod.loglike
score = mod.score
hess = mod.hessian
#cs doesn't work for Probit because special.ndtr doesn't support complex
#maybe calculating ndtr for real and imag parts separately, if we need it
#and if it still works in this case
print 'sm', score(test_params)
print 'fd', numdiff.approx_fprime(test_params,loglike,epsilon)
print 'cs', numdiff.approx_fprime_cs(test_params,loglike)
print 'sm', hess(test_params)
print 'fd', numdiff.approx_fprime(test_params,score,epsilon)
print 'cs', numdiff.approx_fprime_cs(test_params, score)
#print 'fd', numdiff.approx_hess(test_params, loglike, epsilon) #TODO: bug
'''
Traceback (most recent call last):
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\test_numdiff.py", line 74, in <module>
print 'fd', numdiff.approx_hess(test_params, loglike, epsilon)
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\regression\numdiff.py", line 118, in approx_hess
xh = x + h
TypeError: can only concatenate list (not "float") to list
'''
hesscs = numdiff.approx_hess_cs(test_params, loglike)
print 'cs', hesscs
print maxabs(hess(test_params), hesscs)
data = sm.datasets.anes96.load()
exog = data.exog
exog = sm.add_constant(exog, prepend=False)
res1 = sm.MNLogit(data.endog, exog).fit(method="newton", disp=0)
datap = sm.datasets.randhie.load()
nobs = len(datap.endog)
exogp = sm.add_constant(datap.exog.view(float).reshape(nobs,-1),
prepend=False)
modp = sm.Poisson(datap.endog, exogp)
resp = modp.fit(method='newton', disp=0)
| yarikoptic/pystatsmodels | statsmodels/tools/tests/test_numdiff.py | Python | bsd-3-clause | 14,062 |
#!/usr/bin/env python
"""
deps:
*) pip install --upgrade pip wheel setuptools twine
publish:
1) #> python setup.py sdist bdist_wheel
2) #> twine upload dist/* #<specific specific wheel if needed>; --repository <testpypi> or --repository-url <testpypi-url>
"""
import os
from setuptools import setup, find_packages
def read(fname):
fpath = os.path.join(os.path.dirname(__file__), fname)
fd = open(fpath, 'r')
data = fd.read()
fd.close()
return data
version = '0.3.1' # keep this in sync with the last release tag!
setup(name='Evmlab',
version=version,
description='Ethereum EVM utilities',
author='Martin Holst Swende',
author_email='martin.swende@ethereum.org',
license="GPLv3",
keywords=["ethereum", "transaction", "debugger"],
url="https://github.com/ethereum/evmlab/",
download_url="https://github.com/ethereum/evmlab/tarball/v%s" % version,
# generate rst from .md: pandoc --from=markdown --to=rst README.md -o README.rst (fix diff section and footer)
long_description=read("README.md") if os.path.isfile("README.md") else "",
# for pypi.org; empty because we do not ship Readme.md with the package. may otherwise fail on install
long_description_content_type='text/markdown', # requires twine and recent setuptools
packages=find_packages(),
package_data={'evmlab.tools.reproducer': ["templates/*"]},
install_requires=["requests",
"web3",
"eth-hash[pycryptodome]",
"rlp>=1.0",
"evmdasm"],
extras_require={"consolegui": ["urwid"],
"abidecoder": ["ethereum-input-decoder"],
"docker": ["docker==3.0.0"],
"fuzztests": ["docker==3.0.0", "evmcodegen"],
}
)
| holiman/evmlab | setup.py | Python | gpl-3.0 | 1,872 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contributions', '0062_auto_20170330_1530'),
]
operations = [
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Name')),
],
),
]
| phani00/tovp | tovp/contributions/migrations/0063_source.py | Python | mit | 606 |
"""
Measure management functions.
"""
import binascii
import errno
import glob
import os
import shutil
import xdrlib
from datetime import datetime
from pas import conf
from pas import tshark
from pas import shell
from pas import xml
from pas.conf import settings
from pas.conf import map_interfaces
from pas.conf import role
from pas.conf import stylesheet
from pas.parser import errors
from pas.parser import registry
from pas.parser import protocol
from lxml import etree
def select(name=None, basedir=None):
"""
Scans the basedir (or the shared-measures directory defined in the
settings) for directories and returns a choice based on different
criteria:
1. No directories are found; raise a RuntimeError
2. The name is set; check if it was found and if so return it
3. Only one directory is found; return the found directory
4. Multiple directories are found; ask the user to pick one
"""
name = name.rsplit('_', 2) if name else ()
if not basedir:
basedir = settings.PATHS['shared-measures'][0]
# Get all files in the directory
paths = os.listdir(basedir)
# Rebuild the full path name
paths = [(os.path.join(basedir, p), p.rsplit('_', 2)) for p in paths]
# Filter out non-directories
paths = [p for p in paths if os.path.isdir(p[0])]
# If no entries remained, there are no test cases which can be run
if not paths:
raise RuntimeError("No test cases found.")
# Check to see if chosen value exists in the available paths
if name:
for path in paths:
if path[1] == name:
return path[0], '_'.join(path[1])
else:
# Continue with selecting phase
# @TODO: log
print "The chosen path is not available."
# There is not much to choose here
if len(paths) == 1:
# @TODO: log
print "\nOnly one measure found: {0} ({1} at {2}).".format(*paths[0][1])
path = paths[0]
return path[0], '_'.join(path[1])
# Present a list of choices to the user (paths must now contain more than
# one item)
print "\nMultiple measures found:\n"
for i, (path, name) in enumerate(paths):
index = '[{}]'.format(i)
print '{0:>8s}: {1} ({2} at {3})'.format(index, *name)
def valid(index):
"""
Returns the correct entry in the paths list or asks for a correct
value if the index is outside the boundaries.
"""
try:
path = paths[int(index)]
return path[0], '_'.join(path[1])
except (IndexError, ValueError):
raise Exception("Enter an integer between 0 " \
"and {0}.".format(len(paths)-1))
print
return shell.prompt("Select a measure:", validate=valid)
def start(name):
"""
Start a new named measure session in background on all interested hosts.
The hosts are retrieved from the ROLES setting directive and a
measure is started for each one.
"""
dest = settings.PATHS['local-measures'][1]
fltr = settings.CAPTURE_FILTER
for host, interfaces in map_interfaces():
with shell.workon(host):
shell.remote('rm -rf {0} ; mkdir {0}'.format(dest), sudo=True)
for i in interfaces:
mname = '{0}.{1}'.format(name, i)
tshark.start(mname, i, '{0}/{1}.raw'.format(dest, mname), fltr)
def stop(name):
"""
Start a new named measure session in background on all interested hosts.
As for the start function, the hosts are retrieved from the interfaces
setting directive and the stop command issued on each one.
"""
for host, interfaces in map_interfaces():
with shell.workon(host):
for i in interfaces:
tshark.stop(name, i)
def kill():
"""
Alias for tshark.kill
"""
return tshark.kill()
def collect(name, overwrite=False):
"""
Moves the relevant files to the shared directory by asking to empty the
destination directory if needed.
"""
ipaddr = '$(getip eth1)'
name = "{0}_{1}".format(name, datetime.now().strftime('%Y-%m-%d_%H:%M'))
guest_local = settings.PATHS['local-measures'][1]
host_shared, guest_shared = settings.PATHS['shared-measures']
destination = os.path.join(guest_shared, name, ipaddr)
local = os.path.realpath(os.path.join(host_shared, name))
try:
if os.listdir(local):
print "A directory with the same name ({0}) already " \
"exists.".format(name)
if overwrite or shell.confirm("Would you like to replace it?"):
shell.local('rm -rf {0}/*'.format(local))
else:
raise OSError(errno.ENOTEMPTY, "Directory not empty")
except OSError as e:
# If the file or directory don't exist, consume the exception
if e.errno != errno.ENOENT:
raise
shell.remote('chown -R {0}:{0} {1}'.format(settings.VM_USER, guest_local),
sudo=True)
shell.remote('mkdir -p "{0}/logs"'.format(destination))
shell.remote('cp {0}/* "{1}"'.format(guest_local, destination))
# Copy log files
for logfile in settings.LOG_FILES:
shell.remote('chown {0}:{0} "{1}" || true'.format(settings.VM_USER,
logfile), sudo=True)
shell.remote('cp "{0}" "{1}/logs" || true'.format(logfile,
destination))
def toxml(name):
"""
Converts all raw measure files for the given measure to xml using a remote
tshark command.
This will overwrite all already converted files with matching names.
"""
host_shared, guest_shared = settings.PATHS['shared-measures']
pattern = os.path.join(host_shared, name, "*", "*.raw")
paths = glob.glob(pattern)
paths = (guest_shared + path[len(host_shared):] for path in paths)
with shell.workon(role('client')):
for path in paths:
tshark.pcaptoxml(path, path.replace('.raw', '.xml'),
settings.DISPLAY_FILTER)
def simplify(name, prettyprint=True):
"""
Simplifies all the measure files in pdxml format of the given measure,
converting them using the simplify XSL stylesheet. Old simplifications
will be overwritten.
If the prettyprint optional argument is True, the result will be formatted
using the xmllint tool.
"""
host_shared = settings.PATHS['shared-measures'][0]
pattern = os.path.join(host_shared, name, "*", "*.xml")
simplifier = xml.Transformation(stylesheet('simplify.xsl'))
for source in glob.glob(pattern):
if len(os.path.basename(source).split('.')) == 3:
dest = source.replace('.xml', '.simple.xml')
simplifier.parameters['loopback'] = str(int(source.endswith(
'.lo.xml')))
simplifier.transform(source, dest)
if prettyprint:
xml.prettyprint(dest)
def decode(name, measure_case, prettyprint=False):
"""
Decodes the simplified XML representation of the given measure by adding
a "decoded" element to each packet containing a payload.
The decoding is done using an XSL transformation coupled with an xslt
python extension function which provides the "decoded" element given a
payload text string.
"""
host_shared = settings.PATHS['shared-measures'][0]
types = os.path.join(measure_case, "types.py")
types_registry = registry.TypesRegistry()
types_registry.load('pas.conf.basetypes')
try:
types_registry.parse(types)
except IOError:
pass
proto = protocol.MappingProtocol(types_registry)
trans = xml.Transformation(stylesheet('decode.xsl'))
def _decode(context, payload):
"""
Decoding callback
"""
# Convert the ascii representation back to binary data
bin_payload = binascii.a2b_hex(''.join(payload))
# Create an xdr stream with the payload
stream = xdrlib.Unpacker(bin_payload)
# Read the full frame length, it is not needed here
_ = stream.unpack_uint()
try:
# Decode the remaining data as a full frame...
# ...hoping that tcp hasn't split the message in more frames
message = proto.decode_full_frame(stream)
# @TODO: Logging, output and error management
except EOFError as e:
print "-" * 80
print context, "Not enough data:", e
print repr(stream.get_buffer())
print "-" * 80
return
except errors.UnknownClass as e:
print "-" * 80
print context.context_node.attrib['timestamp'],
print "Error while decoding packet:", e
print binascii.b2a_hex(stream.get_buffer())
print "-" * 80
return
except errors.UnknownMethod as e:
print "-" * 80
print context.context_node.attrib['timestamp'],
print "Error while decoding packet:", e
print binascii.b2a_hex(stream.get_buffer())
print "-" * 80
return
except xdrlib.Error as e:
print "-" * 80
print context.context_node.attrib['timestamp'], e
print repr(e.message)
rest = stream.get_buffer()
rem = stream.get_position()
print binascii.b2a_hex(rest[rem:])
print
print repr(rest[rem:])
print
print str(rem) + "/" + str(_)
print "*" * 80
return
# Convert the message to xml and send it back to the XSL template
return message.toxml()
trans.register_function('http://gridgroup.eia-fr.ch/popc',
_decode, 'decode')
# Apply transformation to all simplified xml files
pattern = os.path.join(host_shared, name, "*", "*.simple.xml")
for source in glob.glob(pattern):
dest = source.replace('.simple.xml', '.decoded.xml')
trans.transform(source, dest)
if prettyprint:
xml.prettyprint(dest)
def report(name, measure_case):
"""
Assembles all the acquired resources (such as source code, measures and
log files) and generates an html page suitable for human interaction and
analysis.
"""
host_shared = settings.PATHS['shared-measures'][0]
trans = xml.Transformation(stylesheet('report.xsl'))
def sources(_):
els = etree.Element('files')
base = len(measure_case)+1
for root, dirs, files in os.walk(measure_case):
print root
for f in files:
if f.endswith(('.pyc', '.DS_Store', '.o')):
continue
path = os.path.join(root, f)
name = path[base:]
if name.startswith('build/'):
continue
element = etree.SubElement(els, 'file')
element.attrib['path'] = path
element.attrib['name'] = name
return els
trans.register_function('http://gridgroup.eia-fr.ch/popc', sources)
def logs(_):
els = etree.Element('files')
basel = len(os.path.join(settings.ENV_BASE, host_shared, name))
base = os.path.join(settings.ENV_BASE, host_shared, name, '*.*.*.*', 'logs', '*')
for log in glob.glob(base):
element = etree.SubElement(els, 'file')
element.attrib['path'] = log
element.attrib['name'] = log[basel+1:]
return els
trans.register_function('http://gridgroup.eia-fr.ch/popc', logs)
def format_stream(_, payload):
"""
Stream formatting xslt callback
"""
payload = ''.join(payload)
def chunks(seq, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(seq), n):
yield seq[i:i+n]
element = etree.Element('pre')
payload = ' '.join(chunks(payload, 2))
payload = ' '.join(chunks(payload, 12))
payload = '\n'.join(chunks(payload, 104))
for chunk in chunks(payload, 420):
etree.SubElement(element, 'span').text = chunk
return element
trans.register_function('http://gridgroup.eia-fr.ch/popc', format_stream)
class Highlighter(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
from pygments import highlight
from pygments import lexers
from pygments.formatters import HtmlFormatter
# Highlight source text with pygments
source = input_node.attrib['path']
with open(source) as fh:
code = fh.read()
# Chose a lexer
name = os.path.split(source)[1]
if name == 'Makefile':
lexer = lexers.BaseMakefileLexer()
elif name.endswith('.py'):
lexer = lexers.PythonLexer()
elif name.endswith(('.cc', '.ph', '.h')):
lexer = lexers.CppLexer()
elif name.endswith(('.c',)):
lexer = lexers.CLexer()
else:
lexer = lexers.TextLexer()
# Highlight code
highlighted = highlight(
code, lexer, HtmlFormatter(cssclass="codehilite", style="pastie", linenos='table')
)
# Convert to xml
root = etree.fromstring(highlighted)
# Add to parent
output_parent.extend(root)
trans.register_element('http://gridgroup.eia-fr.ch/popc', 'highlighted', Highlighter())
destination = os.path.join(host_shared, name, 'report')
shutil.rmtree(destination, True)
shell.local("mkdir -p {0}".format(destination))
pattern = os.path.join(host_shared, name, "*", "*.decoded.xml")
for source in glob.glob(pattern):
base, measure = os.path.split(source)
interface = measure.rsplit('.', 3)[1]
ip = os.path.basename(base).replace('.', '-')
dest = os.path.join(destination, '{0}_{1}.html'.format(ip, interface))
trans.transform(source, dest)
# Tidy
tconf = "conf/tidy/tidy.conf"
shell.local('tidy -config {1} -o {0} {0} || true'.format(dest, tconf))
# Copy resources
htdocs = os.path.join(os.path.dirname(conf.__file__), 'htdocs')
#shell.local("ln -s {0} {1}".format(os.path.join(htdocs, 'styles'),
# os.path.join(destination, 'styles')))
#shell.local("ln -s {0} {1}".format(os.path.join(htdocs, 'images'),
# os.path.join(destination, 'images')))
#shell.local("ln -s {0} {1}".format(os.path.join(htdocs, 'scripts'),
# os.path.join(destination, 'scripts')))
shutil.copytree(
os.path.join(htdocs, 'styles'),
os.path.join(destination, 'styles')
)
shutil.copytree(
os.path.join(htdocs, 'images'),
os.path.join(destination, 'images')
)
shutil.copytree(
os.path.join(htdocs, 'scripts'),
os.path.join(destination, 'scripts')
)
| GaretJax/pop-analysis-suite | pas/measure.py | Python | mit | 15,485 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from pymongo import MongoClient, DESCENDING
from secret import server
from json import loads
def fast():
db = MongoClient(server.MONGODB_DEV_URI)["mirrors"]
find_law = {
"hits": { "$gt": 0 },
"pages": { "$gt": 0 },
"bandwidth": { "$gt": 0}
}
inven_cursor = db["ip1_5"].find(find_law)
re_currsor = inven_cursor.sort("bandwidth", DESCENDING).limit(2000)
pcoll = db["ip1_5_s"]
for e in re_currsor:
pcoll.insert_one(e)
def fetch():
qurl = "http://freeapi.ipip.net"
db = MongoClient(server.MONGODB_DEV_URI)["mirrors"]
cursor = db["ip1_5_s"].find().sort("bandwidth", DESCENDING)
for e in cursor:
if "location" in e.keys(): continue
re = requests.get('/'.join([qurl, e["host"]]))
if re.status_code != 200:
raise Exception(' '.join(["Fetch ip location error", str(re.status_code)]))
e["location"] = loads(re.text)
db["ip1_5_s"].save(e)
def figure():
db = MongoClient(server.MONGODB_DEV_URI)["mirrors"]
# db = MongoClient("localhost", 27017)["mirrors"]
cursor = db["ip1_5_s"].find()
re = {u"重庆大学": 0, u"教育网": 0}
for e in cursor:
location = e["location"]
if location[0] not in re.keys():
re[location[0]] = 0
if location[1] not in re.keys():
re[location[1]] = 0
if location[0] == location[1]:
re[location[0]] += 1
else:
re[location[0]] += 1
re[location[1]] += 1
if location[3] == u"重庆大学":
re[location[3]] += 1
if location[4] == u"教育网":
re[location[4]] += 1
rank = sorted(re.items(), key=lambda x: x[1], reverse=True)
with open("rank.txt", 'w') as outfile:
for e in rank:
outfile.write(e[0].encode("utf-8"))
outfile.write(''.join([' ', str(e[1]), '\n']))
def main():
# fast()
# fetch()
# figure()
if __name__ == "__main__":
main()
| ccreimondo/visual-ip | fetch_location.py | Python | mit | 2,063 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this s3 except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the S3 backend store"""
import StringIO
import hashlib
import httplib
import sys
import unittest
import urlparse
import stubout
import boto.s3.connection
from glance.common import exception
from glance.common import utils
from glance.store import BackendException, UnsupportedBackend
from glance.store.location import get_location_from_uri
from glance.store.s3 import Store
from glance.tests import utils as test_utils
FAKE_UUID = utils.generate_uuid()
FIVE_KB = (5 * 1024)
S3_CONF = {'verbose': True,
'debug': True,
's3_store_access_key': 'user',
's3_store_secret_key': 'key',
's3_store_host': 'localhost:8080',
's3_store_bucket': 'glance'}
# We stub out as little as possible to ensure that the code paths
# between glance.store.s3 and boto.s3.connection are tested
# thoroughly
def stub_out_s3(stubs):
class FakeKey:
"""
Acts like a ``boto.s3.key.Key``
"""
def __init__(self, bucket, name):
self.bucket = bucket
self.name = name
self.data = None
self.size = 0
self.BufferSize = 1024
def close(self):
pass
def exists(self):
return self.bucket.exists(self.name)
def delete(self):
self.bucket.delete(self.name)
def compute_md5(self, data):
chunk = data.read(self.BufferSize)
checksum = hashlib.md5()
while chunk:
checksum.update(chunk)
chunk = data.read(self.BufferSize)
checksum_hex = checksum.hexdigest()
return checksum_hex, None
def set_contents_from_file(self, fp, replace=False, **kwargs):
self.data = StringIO.StringIO()
for bytes in fp:
self.data.write(bytes)
self.size = self.data.len
# Reset the buffer to start
self.data.seek(0)
self.read = self.data.read
def get_file(self):
return self.data
class FakeBucket:
"""
Acts like a ``boto.s3.bucket.Bucket``
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or {}
def __str__(self):
return self.name
def exists(self, key):
return key in self.keys
def delete(self, key):
del self.keys[key]
def get_key(self, key_name, **kwargs):
key = self.keys.get(key_name)
if not key:
return FakeKey(self, key_name)
return key
def new_key(self, key_name):
new_key = FakeKey(self, key_name)
self.keys[key_name] = new_key
return new_key
fixture_buckets = {'glance': FakeBucket('glance')}
b = fixture_buckets['glance']
k = b.new_key(FAKE_UUID)
k.set_contents_from_file(StringIO.StringIO("*" * FIVE_KB))
def fake_connection_constructor(self, *args, **kwargs):
host = kwargs.get('host')
if host.startswith('http://') or host.startswith('https://'):
raise UnsupportedBackend(host)
def fake_get_bucket(conn, bucket_id):
bucket = fixture_buckets.get(bucket_id)
if not bucket:
bucket = FakeBucket(bucket_id)
return bucket
stubs.Set(boto.s3.connection.S3Connection,
'__init__', fake_connection_constructor)
stubs.Set(boto.s3.connection.S3Connection,
'get_bucket', fake_get_bucket)
def format_s3_location(user, key, authurl, bucket, obj):
"""
Helper method that returns a S3 store URI given
the component pieces.
"""
scheme = 's3'
if authurl.startswith('https://'):
scheme = 's3+https'
authurl = authurl[8:]
elif authurl.startswith('http://'):
authurl = authurl[7:]
authurl = authurl.strip('/')
return "%s://%s:%s@%s/%s/%s" % (scheme, user, key, authurl,
bucket, obj)
class TestStore(unittest.TestCase):
def setUp(self):
"""Establish a clean test environment"""
self.stubs = stubout.StubOutForTesting()
stub_out_s3(self.stubs)
self.store = Store(test_utils.TestConfigOpts(S3_CONF))
def tearDown(self):
"""Clear the test environment"""
self.stubs.UnsetAll()
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
loc = get_location_from_uri(
"s3://user:key@auth_address/glance/%s" % FAKE_UUID)
(image_s3, image_size) = self.store.get(loc)
self.assertEqual(image_size, FIVE_KB)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_s3:
data += chunk
self.assertEqual(expected_data, data)
def test_get_non_existing(self):
"""
Test that trying to retrieve a s3 that doesn't exist
raises an error
"""
uri = "s3://user:key@auth_address/badbucket/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.get, loc)
uri = "s3://user:key@auth_address/glance/noexist"
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_add(self):
"""Test that we can add an image via the s3 backend"""
expected_image_id = utils.generate_uuid()
expected_s3_size = FIVE_KB
expected_s3_contents = "*" * expected_s3_size
expected_checksum = hashlib.md5(expected_s3_contents).hexdigest()
expected_location = format_s3_location(
S3_CONF['s3_store_access_key'],
S3_CONF['s3_store_secret_key'],
S3_CONF['s3_store_host'],
S3_CONF['s3_store_bucket'],
expected_image_id)
image_s3 = StringIO.StringIO(expected_s3_contents)
location, size, checksum = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEquals(expected_location, location)
self.assertEquals(expected_s3_size, size)
self.assertEquals(expected_checksum, checksum)
loc = get_location_from_uri(expected_location)
(new_image_s3, new_image_size) = self.store.get(loc)
new_image_contents = StringIO.StringIO()
for chunk in new_image_s3:
new_image_contents.write(chunk)
new_image_s3_size = new_image_contents.len
self.assertEquals(expected_s3_contents, new_image_contents.getvalue())
self.assertEquals(expected_s3_size, new_image_s3_size)
def test_add_host_variations(self):
"""
Test that having http(s):// in the s3serviceurl in config
options works as expected.
"""
variations = ['http://localhost:80',
'http://localhost',
'http://localhost/v1',
'http://localhost/v1/',
'https://localhost',
'https://localhost:8080',
'https://localhost/v1',
'https://localhost/v1/',
'localhost',
'localhost:8080/v1']
for variation in variations:
expected_image_id = utils.generate_uuid()
expected_s3_size = FIVE_KB
expected_s3_contents = "*" * expected_s3_size
expected_checksum = \
hashlib.md5(expected_s3_contents).hexdigest()
new_conf = S3_CONF.copy()
new_conf['s3_store_host'] = variation
expected_location = format_s3_location(
new_conf['s3_store_access_key'],
new_conf['s3_store_secret_key'],
new_conf['s3_store_host'],
new_conf['s3_store_bucket'],
expected_image_id)
image_s3 = StringIO.StringIO(expected_s3_contents)
self.store = Store(test_utils.TestConfigOpts(new_conf))
location, size, checksum = self.store.add(expected_image_id,
image_s3,
expected_s3_size)
self.assertEquals(expected_location, location)
self.assertEquals(expected_s3_size, size)
self.assertEquals(expected_checksum, checksum)
loc = get_location_from_uri(expected_location)
(new_image_s3, new_image_size) = self.store.get(loc)
new_image_contents = new_image_s3.getvalue()
new_image_s3_size = new_image_s3.len
self.assertEquals(expected_s3_contents, new_image_contents)
self.assertEquals(expected_s3_size, new_image_s3_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
image_s3 = StringIO.StringIO("nevergonnamakeit")
self.assertRaises(exception.Duplicate,
self.store.add,
FAKE_UUID, image_s3, 0)
def _option_required(self, key):
conf = S3_CONF.copy()
del conf[key]
try:
self.store = Store(test_utils.TestConfigOpts(conf))
return self.store.add == self.store.add_disabled
except:
return False
return False
def test_no_access_key(self):
"""
Tests that options without access key disables the add method
"""
self.assertTrue(self._option_required('s3_store_access_key'))
def test_no_secret_key(self):
"""
Tests that options without secret key disables the add method
"""
self.assertTrue(self._option_required('s3_store_secret_key'))
def test_no_host(self):
"""
Tests that options without host disables the add method
"""
self.assertTrue(self._option_required('s3_store_host'))
def test_delete(self):
"""
Test we can delete an existing image in the s3 store
"""
uri = "s3://user:key@auth_address/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exception.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a s3 that doesn't exist
raises an error
"""
uri = "s3://user:key@auth_address/glance/noexist"
loc = get_location_from_uri(uri)
self.assertRaises(exception.NotFound, self.store.delete, loc)
| rcbops/glance-buildpackage | glance/tests/unit/test_s3_store.py | Python | apache-2.0 | 11,404 |
import re
from lxml import etree
from pyramid.settings import asbool
from .exception import ConfigurationError
def clean_oai_settings(settings):
"""Parse and validate OAI app settings in a dictionary.
Check that the settings required by the OAI app are in the settings
dictionary and have valid values. Convert them to correct types.
Required settings are:
admin_emails
deleted_records
item_list_limit
logging_config
repository_descriptions
repository_name
sqlalchemy.url
Parameters
----------
settings: dict from str to str
The settings dictionary.
Raises
------
ConfigurationError:
If some setting is missing or has an invalid value.
"""
cleaners = {
'admin_emails': _clean_admin_emails,
'deleted_records': _clean_deleted_records,
'item_list_limit': _clean_item_list_limit,
'logging_config': _clean_unicode,
'repository_descriptions': _load_repository_descriptions,
'repository_name': _clean_unicode,
'sqlalchemy.url': _clean_unicode,
}
_clean_settings(settings, cleaners)
def clean_importer_settings(settings):
"""Parse and validate metadata importer settings in a dictionary.
Check that the settings required by the metadata importer are in the
settings dictionary and have valid values. Convert them to correct
types. Required settings are:
deleted_records
dry_run
force_update
logging_config
sqlalchemy.url
timestamp_file
metadata_provider_class
metadata_provider_args
Parameters
----------
settings: dict from str to str
The settings dictionary.
Raises
------
ConfigurationError:
If some setting is missing or has an invalid value.
"""
cleaners = {
'deleted_records': _clean_deleted_records,
'dry_run': _clean_boolean,
'force_update': _clean_boolean,
'logging_config': _clean_unicode,
'sqlalchemy.url': _clean_unicode,
'timestamp_file': _clean_unicode,
'metadata_provider_args': _clean_unicode,
'metadata_provider_class': _clean_provider_class,
}
return _clean_settings(settings, cleaners)
def _clean_settings(settings, cleaners):
"""Check that settings are ok.
The parameter `cleaners` is a dict from setting names to functions.
Each cleaner function is called with the value of the corresponding
setting. The cleaners should raise an exception if the value is invalid
and otherwise return a cleaned value. The old value gets replaced by
the cleaned value.
Parameters
----------
settings: dict from str to str
The settings dictionary.
cleaners: dict from str to callable
Mapping from setting names to cleaner functions.
Raises
------
ConfigurationError:
If any setting is missing or invalid.
"""
for name, func in cleaners.iteritems():
if name not in settings:
raise ConfigurationError('missing setting {0}'.format(name))
try:
cleaned = func(settings[name])
settings[name] = cleaned
except Exception as error:
raise ConfigurationError(
'invalid {0} setting: {1}'.format(name, error)
)
def _clean_admin_emails(value):
"""Check that the value is a list of valid email addresses."""
# email regex pattern defined in the OAI-PMH XML schema
pattern = re.compile(r'^\S+@(\S+\.)+\S+$', flags=re.UNICODE)
emails = _clean_unicode(value).split()
if not emails:
raise ValueError('no emails')
for email in emails:
if re.match(pattern, email) is None:
raise ValueError(
'invalid email address: {0}'
''.format(repr(email))
)
return emails
def _clean_deleted_records(value):
"""Check that value is one of "no", "transient", "persistent"."""
allowed_values = ['no', 'transient', 'persistent']
if value not in allowed_values:
raise ValueError('deleted_records must be one of {0}'.format(
allowed_values
))
return unicode(value)
def _clean_boolean(value):
"""Return the value as a bool."""
return asbool(value)
def _clean_item_list_limit(value):
"""Check that value is a positive integer."""
int_value = int(value)
if int_value <= 0:
raise ValueError('item_list_limit must be positive')
return int_value
def _clean_unicode(value):
"""Return the value as a unicode."""
if isinstance(value, str):
return value.decode('utf-8')
else:
return unicode(value)
def _clean_provider_class(value):
"""Split the value to module name and classname."""
modulename, classname = value.split(':')
if len(modulename) == 0:
raise ValueError('empty module name')
if len(classname) == 0:
raise ValueError('empty class name')
return (modulename, classname)
def _load_repository_descriptions(value):
"""Load XML fragments from files."""
def load_description(path):
"""Load a single description."""
with open(path, 'r') as file_:
contents = file_.read()
try:
doc = etree.fromstring(contents.encode('utf-8'))
except Exception as error:
raise ValueError(
'ill-formed XML in repository description {0}: '
'{1}'.format(repr(path), error)
)
xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance'
if doc.get('{{{0}}}schemaLocation'.format(xsi_ns)) is None:
raise ValueError('no schema location')
return contents
paths = value.split()
return map(load_description, paths)
| Tietoarkisto/kuha | kuha/config.py | Python | bsd-3-clause | 6,033 |
#!/usr/bin/env python
import uvloop
import asyncio
import time
from pprint import pprint
from owllook.fetcher.parse import novels_search
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
def novel_task(url):
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(novels_search(url))
loop.run_until_complete(task)
return task.result()
start = time.time()
#result = novel_task('http://www.shuge.net/html/98/98044')
result = novel_task('www.dingdianzw.com')
pprint(result)
print(time.time() - start)
| zhiyue/owllook | tests/parse_test.py | Python | apache-2.0 | 530 |
# -*- coding: utf-8 -*-
"""
Foundation Images
=================
This plugin is mostly for the foundation theme I'm building
which allows one to easily insert centered images with captions
into posts. It's extremely basic and niche for now, but I might
make it more fully-featured later.
"""
from pelican import signals, contents
import re
replacementString = r'''<div class="row image-container">
<div class="small-12 columns text-center">
<div class="row">
<div class="small-12 columns text-center image-image">
<img src="\1"/>
</div>
</div>
<div class="row image-caption">
\2
</div>
</div>
</div>'''
def processImages(content):
if isinstance(content, contents.Static):
return
text = re.sub(r"\{image-caption\[(?P<url>.*?)\]\[(?P<caption>.*?)\]\}", replacementString, content._content)
content._content = text
def register():
signals.content_object_init.connect(processImages)
| FuzzyWuzzie/foundation_images | foundation_images.py | Python | mit | 914 |
import array
import numpy as np
import tensorflow as tf
from collections import defaultdict
def load_vocab(filename):
vocab = None
with open(filename) as f:
vocab = f.read().splitlines()
dct = defaultdict(int)
for idx, word in enumerate(vocab):
dct[word] = idx
return [vocab, dct]
def load_glove_vectors(filename, vocab):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
tf.logging.info("Found {} out of {} vectors in Glove".format(num_vectors, len(vocab)))
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def build_initial_embedding_matrix(vocab_dict, glove_dict, glove_vectors, embedding_dim):
initial_embeddings = np.random.uniform(-0.25, 0.25, (len(vocab_dict), embedding_dim)).astype("float32")
for word, glove_word_idx in glove_dict.items():
word_idx = vocab_dict.get(word)
initial_embeddings[word_idx, :] = glove_vectors[glove_word_idx]
return initial_embeddings
| baeriivan/OpensubtitleBot | models/helpers.py | Python | mit | 1,428 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the ChannelMerger class.
"""
from harpia.GUI.fieldtypes import *
from harpia.model.plugin import Plugin
class ChannelMerger(Plugin):
# -------------------------------------------------------------------------
def __init__(self):
Plugin.__init__(self)
self.language = "javascript"
self.framework = "webaudio"
self.help = "Channel Merger"
self.label = "Channel Merger"
self.color = "50:150:250:150"
self.in_ports = [{"type":"harpia.extensions.javascript.ports.sound",
"label":"Sound Input 1",
"name":"sound_input_1"},
{"type":"harpia.extensions.javascript.ports.sound",
"label":"Sound Input 2",
"name":"sound_input_2"}
]
self.out_ports = [{"type":"harpia.extensions.javascript.ports.sound",
"label":"Sound Output",
"name":"sound_output"}
]
self.group = "Sound"
self.codes[0] = """
Merger = function(context) {
var that = this;
this.x = 0; // Initial sample number
this.context = context;
this.node = context.createScriptProcessor(1024, 1, 1);
this.node.onaudioprocess = function(e) { that.process(e) };
}
Merger.prototype.process = function(e) {
var in1 = e.inputBuffer.getChannelData(0);
var out = e.outputBuffer.getChannelData(0);
for (var i = 0; i < in1.length; ++i) {
out[i] = in1[i];
}
}
"""
self.codes[1] = """
// block_$id$ = Channel Merger
var block_$id$_obj = new Merger(context);
var $out_ports[sound_output]$ = block_$id$_obj.node;
var $in_ports[sound_input_1]$ = block_$id$_obj.node;
var $in_ports[sound_input_2]$ = block_$id$_obj.node;
"""
| llgoncalves/harpia | harpia/extensions/javascript/webaudio/channelmerger.py | Python | gpl-2.0 | 1,789 |
# coding:utf-8
from flask import Flask
def create_app():
app = Flask(__name__)
app.config.from_object("application.config.DevelopmentConfig")
return app
app = create_app()
from application import views
| shiraco/gourmetgrapher | application/__init__.py | Python | apache-2.0 | 219 |
"""Support for Tahoma scenes."""
import logging
from homeassistant.components.scene import Scene
from . import DOMAIN as TAHOMA_DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tahoma scenes."""
controller = hass.data[TAHOMA_DOMAIN]['controller']
scenes = []
for scene in hass.data[TAHOMA_DOMAIN]['scenes']:
scenes.append(TahomaScene(scene, controller))
add_entities(scenes, True)
class TahomaScene(Scene):
"""Representation of a Tahoma scene entity."""
def __init__(self, tahoma_scene, controller):
"""Initialize the scene."""
self.tahoma_scene = tahoma_scene
self.controller = controller
self._name = self.tahoma_scene.name
def activate(self):
"""Activate the scene."""
self.controller.launch_action_group(self.tahoma_scene.oid)
@property
def name(self):
"""Return the name of the scene."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the scene."""
return {'tahoma_scene_oid': self.tahoma_scene.oid}
| MartinHjelmare/home-assistant | homeassistant/components/tahoma/scene.py | Python | apache-2.0 | 1,181 |
from django.contrib import admin
# Register your models here.
from app.models import Crags
admin.site.register(Crags)
| agronauts/rockandrice | rocknrice/app/admin.py | Python | gpl-3.0 | 120 |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with child processes."""
import fcntl
import os
import subprocess
import select
class _LineAccumulator(object):
"""Accumulates data and calls a callback for every line."""
def __init__(self, lineCallback):
self.__buffer = []
self.__cb = lineCallback
def add(self, data):
# If there is a newline, call the callback on the data we've got so
# far plus the newline.
newline_pos = data.find('\n') + 1
while newline_pos > 0:
line = ''.join(self.__buffer) + data[:newline_pos]
self.__buffer = []
self.__cb(line)
data = data[newline_pos:]
newline_pos = data.find('\n') + 1
# add whatever's leftover to the buffer.
if data:
self.__buffer.append(data)
def finish(self):
self.__cb(''.join(self.__buffer))
self.__buffer = []
def _set_nonblocking(f):
fd = f.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def run(*args, **kwargs):
stdout_callback = kwargs.get('stdout_callback')
stderr_callback = kwargs.get('stderr_callback')
pipe_error = kwargs.get('pipe_error_callback')
stdin = kwargs.get('stdin')
# Define the accumulators to help us manage the process output.
stdout_accumulator = _LineAccumulator(stdout_callback)
stderr_accumulator = _LineAccumulator(stderr_callback)
proc = subprocess.Popen(args,
stdout=subprocess.PIPE if stdout_callback else None,
stderr=subprocess.PIPE if stderr_callback else
None,
stdin=subprocess.PIPE if stdin else None)
inputs = [proc.stdout, proc.stderr]
outputs = [proc.stdin] if stdin else []
# Make the inputs non-blocking
if stdout_callback:
_set_nonblocking(proc.stdout)
if stderr_callback:
_set_nonblocking(proc.stderr)
while inputs or outputs:
rdx, wrx, erx = select.select(inputs, outputs, inputs + outputs)
# Remove any error pipes from consideration.
for p in erx:
if pipe_error:
if p is proc.stderr:
name = 'stderr'
elif p is proc.stdout:
name = 'stdout'
elif p is proc.stdin:
name = 'stdin'
pipe_error(name)
inputs.remove(p)
# Feed in the next chunk of standard input.
if wrx:
if stdin:
wrx[0].write(stdin[:1024])
wrx[0].flush()
stdin = stdin[1024:]
else:
wrx[0].close()
outputs = []
# Read everything from the process.
for p in rdx:
data = os.read(p.fileno(), 1024)
if p is proc.stdout:
stdout_accumulator.add(data)
else:
stderr_accumulator.add(data)
if not data:
inputs.remove(p)
stdout_accumulator.finish()
stderr_accumulator.finish()
return proc.wait()
| GoogleCloudPlatform/SourceXCloud | lib/sxc/proclib.py | Python | apache-2.0 | 3,746 |
# occiput
# Stefano Pedemonte
# Aalto University, School of Science, Helsinki
# Oct 2013, Helsinki
# Harvard University, Martinos Center for Biomedical Imaging
# Boston, MA, USA
from occiput.Visualization.Visualization import ProgressBar
from occiput.DataSources.FileSources.LookupTable import load_freesurfer_lut_file
import subprocess
import logging
import sys
import inspect
import os
class Downloader_HTTP():
def __init__(self):
self._filename = 'unknown'
self._output = None
self._progress_bar = ProgressBar()
self._progress_bar.set_percentage(0)
self._verbose = False
def set_verbose(self, verbose):
self._verbose = verbose
def _set_percentage(self,percentage):
self._progress_bar.set_percentage(percentage)
def download(self, url, output=None, overwrite=False):
# Check if output exists:
if not overwrite:
if output!=None:
if os.path.exists(output):
if self._verbose:
print "File",output,"exists, not dowloading."
self._set_percentage(100)
return output
self._set_percentage(0)
self._output = output
if output==None:
args = ['wget',url,'--continue']
else:
args = ['wget',url,'--continue','-O',output]
try:
pipe = subprocess.Popen(args, bufsize = 0,
shell = False,
stdout = None, # no redirection, child use parent's stdout
stderr = subprocess.PIPE) # redirection stderr, create a new pipe, from which later we will read
except Exception as e:
#inspect.stack()[1][3] will get caller function name
logging.error(inspect.stack()[1][3] + ' error: ' + str(e))
return False
while 1:
s = pipe.stderr.readline()
if s:
p = self._strip_percentage(s)
if p!=None:
self._set_percentage(p)
if self._verbose:
print s
name = self._strip_filename(s)
if name!=None:
self._set_filename(name)
#sys.stdout.write(s)
if pipe.returncode is None:
code = pipe.poll()
else:
break
if not 0 == pipe.returncode:
self._set_percentage(0)
return False
self._set_percentage(100)
if output:
return output
else:
return self._filename
def _strip_percentage(self,s):
p = s[s.find('%')-2:s.find('%')].strip(' ')
try:
percentage = int(p)
except:
return None
else:
return percentage
def _strip_filename(self,s):
if s.find("Saving to")!=-1:
name = s.strip("Saving to").strip("\n").strip("“").strip("'").strip("`")
if self._verbose:
print "Detected name: ",name
return name
def _set_filename(self,name):
self._filename = name
class Dropbox(Downloader_HTTP):
pass
def download_Dropbox(url, output=None, overwrite=False, verbose=False):
D = Dropbox()
D.set_verbose(verbose)
return D.download(url, output, overwrite)
| kastman/occiput | occiput/DataSources/FileSources/Web.py | Python | apache-2.0 | 3,397 |
import scipy.misc
import random
xs = []
ys = []
#points to the end of the last batch
train_batch_pointer = 0
val_batch_pointer = 0
#read data.txt
with open("/usr/local/google/home/limeng/Downloads/nvida/data.txt") as f:
for line in f:
xs.append("/usr/local/google/home/limeng/Downloads/nvida/driving_dataset/" + line.split()[0])
#the paper by Nvidia uses the inverse of the turning radius,
#but steering wheel angle is proportional to the inverse of turning radius
#so the steering wheel angle in radians is used as the output
ys.append(float(line.split()[1]) * scipy.pi / 180)
#get number of images
num_images = len(xs)
#shuffle list of images
c = list(zip(xs, ys))
random.shuffle(c)
xs, ys = zip(*c)
train_xs = xs[:int(len(xs) * 0.8)]
train_ys = ys[:int(len(xs) * 0.8)]
val_xs = xs[-int(len(xs) * 0.2):]
val_ys = ys[-int(len(xs) * 0.2):]
num_train_images = len(train_xs)
num_val_images = len(val_xs)
def load_train_batch(batch_size):
global train_batch_pointer
x_out = []
y_out = []
for i in range(0, batch_size):
x_out.append(
scipy.misc.imresize(
scipy.misc.imread(
train_xs[(train_batch_pointer + i) % num_train_images]), [66, 200]) / 255.0)
y_out.append([train_ys[(train_batch_pointer + i) % num_train_images]])
train_batch_pointer += batch_size
return x_out, y_out
def load_val_batch(batch_size):
global val_batch_pointer
x_out = []
y_out = []
for i in range(0, batch_size):
x_out.append(
scipy.misc.imresize(
scipy.misc.imread(
val_xs[(val_batch_pointer + i) % num_val_images]), [66, 200]) / 255.0)
y_out.append([val_ys[(val_batch_pointer + i) % num_val_images]])
val_batch_pointer += batch_size
return x_out, y_out
| mengli/MachineLearning | self_driving/steering/driving_data.py | Python | apache-2.0 | 1,856 |
import threading
from yandextank.core.tankcore import TankCore
from yandextank.plugins.Telegraf import Plugin as TelegrafPlugin
class TestTelegrafPlugin(object):
def test_plugin_configuration(self):
""" testing telegraf plugin configuration """
cfg = {
'core': {},
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'yandextank/plugins/Telegraf/tests/telegraf_mon.xml'
}
}
core = TankCore(cfg, threading.Event())
telegraf_plugin = core.get_plugin_of_type(TelegrafPlugin)
telegraf_plugin.configure()
assert telegraf_plugin.detected_conf == 'telegraf'
def test_legacy_plugin_configuration(self):
""" testing legacy plugin configuration, old-style monitoring """
cfg = {
'core': {},
'monitoring': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'yandextank/plugins/Telegraf/tests/old_mon.xml'
}
}
core = TankCore(cfg, threading.Event())
telegraf_plugin = core.get_plugin_of_type(TelegrafPlugin)
telegraf_plugin.configure()
assert telegraf_plugin.detected_conf == 'monitoring'
| f2nd/yandex-tank | yandextank/plugins/Telegraf/tests/test_plugin.py | Python | lgpl-2.1 | 1,320 |
# Copyright (c) Mathias Kaerlev 2011-2012.
# This file is part of pyspades.
# pyspades is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyspades is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyspades. If not, see <http://www.gnu.org/licenses/>.
import math
import inspect
from random import choice
from pyspades.constants import *
from pyspades.common import prettify_timespan
from pyspades.server import parse_command
import pyspades.server as server
from twisted.internet import reactor
from map import check_rotation
commands = {}
aliases = {}
rights = {}
class InvalidPlayer(Exception):
pass
class InvalidSpectator(InvalidPlayer):
pass
class InvalidTeam(Exception):
pass
def add_rights(func_name, *user_types):
for user_type in user_types:
if user_type in rights:
rights[user_type].add(func_name)
else:
rights[user_type] = set([func_name])
def restrict(func, *user_types):
def new_func(connection, *arg, **kw):
return func(connection, *arg, **kw)
new_func.func_name = func.func_name
new_func.user_types = user_types
new_func.argspec = inspect.getargspec(func)
return new_func
def has_rights(f, connection):
return not hasattr(f, 'user_types') or f.func_name in connection.rights
def admin(func):
return restrict(func, 'admin')
def name(name):
def dec(func):
func.func_name = name
return func
return dec
def alias(name):
def dec(func):
try:
func.aliases.append(name)
except AttributeError:
func.aliases = [name]
return func
return dec
def get_player(protocol, value, spectators = True):
ret = None
try:
if value.startswith('#'):
value = int(value[1:])
ret = protocol.players[value]
else:
players = protocol.players
try:
ret = players[value]
except KeyError:
value = value.lower()
for player in players.values():
name = player.name.lower()
if name == value:
return player
if name.count(value):
ret = player
except (KeyError, IndexError, ValueError):
pass
if ret is None:
raise InvalidPlayer()
elif not spectators and ret.world_object is None:
raise InvalidSpectator()
return ret
def get_team(connection, value):
value = value.lower()
if value == 'blue':
return connection.protocol.blue_team
elif value == 'green':
return connection.protocol.green_team
elif value == 'spectator':
return connection.protocol.spectator_team
raise InvalidTeam()
def join_arguments(arg, default = None):
if not arg:
return default
return ' '.join(arg)
def parse_maps(pre_maps):
maps = []
for n in pre_maps:
if n[0]=="#" and len(maps)>0:
maps[-1] += " "+n
else:
maps.append(n)
return maps, ', '.join(maps)
@admin
def kick(connection, value, *arg):
reason = join_arguments(arg)
player = get_player(connection.protocol, value)
player.kick(reason)
def get_ban_arguments(connection, arg):
duration = None
if len(arg):
try:
duration = int(arg[0])
arg = arg[1:]
except (IndexError, ValueError):
pass
if duration is None:
if len(arg)>0 and arg[0] == "perma":
arg = arg[1:]
else:
duration = connection.protocol.default_ban_time
reason = join_arguments(arg)
return duration, reason
@admin
def ban(connection, value, *arg):
duration, reason = get_ban_arguments(connection, arg)
player = get_player(connection.protocol, value)
player.ban(reason, duration)
@admin
def hban(connection, value, *arg):
duration = int(60)
reason = join_arguments(arg)
player = get_player(connection.protocol, value)
player.ban(reason, duration)
@admin
def dban(connection, value, *arg):
duration = int(1440)
reason = join_arguments(arg)
player = get_player(connection.protocol, value)
player.ban(reason, duration)
@admin
def wban(connection, value, *arg):
duration = int(10080)
reason = join_arguments(arg)
player = get_player(connection.protocol, value)
player.ban(reason, duration)
@admin
def banip(connection, ip, *arg):
duration, reason = get_ban_arguments(connection, arg)
try:
connection.protocol.add_ban(ip, reason, duration)
except ValueError:
return 'Invalid IP address/network'
reason = ': ' + reason if reason is not None else ''
duration = duration or None
if duration is None:
return 'IP/network %s permabanned%s' % (ip, reason)
else:
return 'IP/network %s banned for %s%s' % (ip,
prettify_timespan(duration * 60), reason)
@admin
def unban(connection, ip):
try:
connection.protocol.remove_ban(ip)
return 'IP unbanned'
except KeyError:
return 'IP not found in ban list'
@name('undoban')
@admin
def undo_ban(connection, *arg):
if len(connection.protocol.bans)>0:
result = connection.protocol.undo_last_ban()
return ('Ban for %s undone' % result[0])
else:
return 'No bans to undo!'
@admin
def say(connection, *arg):
value = ' '.join(arg)
connection.protocol.send_chat(value)
connection.protocol.irc_say(value)
@admin
def csay(connection, color, *arg):
value = ' '.join(arg)
color = int(color)
connection.protocol.send_chat(value, color = color)
connection.protocol.irc_say(value)
add_rights('kill', 'admin')
def kill(connection, value = None):
if value is None:
player = connection
else:
if not connection.rights.kill:
return "You can't use this command"
player = get_player(connection.protocol, value, False)
player.kill()
if connection is not player:
message = '%s killed %s' % (connection.name, player.name)
connection.protocol.send_chat(message, irc = True)
@admin
def sanick(connection, player, nick):
if not connection.protocol.powerthirst:
return "This command is not supported in vanilla mode"
player = get_player(connection.protocol, player, False)
nick = nick[:connection.protocol.max_name_length]
s = "%s #%i is now known as %s" % (player.name, player.player_id, nick)
player.name = nick
connection.protocol.call_ascript("void set_name(int, string &in)", [(ASP_INT, player.player_id), (ASP_PSTRING, player.name)])
connection.protocol.send_chat(s, irc = True)
@admin
def heal(connection, player = None):
if player is not None:
player = get_player(connection.protocol, player, False)
message = '%s was healed by %s' % (player.name, connection.name)
else:
if connection not in connection.protocol.players:
raise ValueError()
player = connection
message = '%s was healed' % (connection.name)
player.refill()
connection.protocol.send_chat(message, irc = True)
def rules(connection):
if connection not in connection.protocol.players:
raise KeyError()
lines = connection.protocol.rules
if lines is None:
return
connection.send_lines(lines)
def help(connection):
"""
This help
"""
if connection.protocol.help is not None and not connection.admin:
connection.send_lines(connection.protocol.help)
else:
names = [command.func_name for command in command_list
if command.func_name in connection.rights]
return 'Available commands: %s' % (', '.join(names))
def login(connection, password):
"""
Login as a user type
"""
if connection not in connection.protocol.players:
raise KeyError()
for user_type, passwords in connection.protocol.passwords.iteritems():
if password in passwords:
if user_type in connection.user_types:
return "You're already logged in as %s" % user_type
return connection.on_user_login(user_type, True)
if connection.login_retries is None:
connection.login_retries = connection.protocol.login_retries - 1
else:
connection.login_retries -= 1
if not connection.login_retries:
connection.kick('Ran out of login attempts')
return
return 'Invalid password - you have %s tries left' % (
connection.login_retries)
def pm(connection, value, *arg):
player = get_player(connection.protocol, value)
message = join_arguments(arg)
player.send_chat('PM from %s: %s' % (connection.name, message))
return 'PM sent to %s' % player.name
@name('admin')
def to_admin(connection, *arg):
protocol = connection.protocol
message = join_arguments(arg)
if not message:
return "Enter a message you want to send, like /admin I'm stuck"
prefix = '(TO ADMINS)'
irc_relay = protocol.irc_relay
if irc_relay:
if irc_relay.factory.bot and irc_relay.factory.bot.colors:
prefix = '\x0304' + prefix + '\x0f'
irc_relay.send(prefix + ' <%s> %s' % (connection.name, message))
for player in protocol.players.values():
if player.admin and player is not connection:
player.send_chat('To ADMINS from %s: %s' %
(connection.name, message))
return 'Message sent to admins'
def streak(connection):
if connection not in connection.protocol.players:
raise KeyError()
return ('Your current kill streak is %s. Best is %s kills' %
(connection.streak, connection.best_streak))
@admin
def lock(connection, value):
team = get_team(connection, value)
team.locked = True
connection.protocol.send_chat('%s team is now locked' % team.name)
connection.protocol.irc_say('* %s locked %s team' % (connection.name,
team.name))
@admin
def unlock(connection, value):
team = get_team(connection, value)
team.locked = False
connection.protocol.send_chat('%s team is now unlocked' % team.name)
connection.protocol.irc_say('* %s unlocked %s team' % (connection.name,
team.name))
@admin
def switch(connection, player = None, team = None):
protocol = connection.protocol
if player is not None:
player = get_player(protocol, player)
elif connection in protocol.players:
player = connection
else:
raise ValueError()
if player.team.spectator:
player.send_chat("The switch command can't be used on a spectating player.")
return
if team is None:
new_team = player.team.other
else:
new_team = get_team(connection, team)
if player.invisible:
old_team = player.team
player.team = new_team
player.on_team_changed(old_team)
player.spawn(player.world_object.position.get())
player.send_chat('Switched to %s team' % player.team.name)
if connection is not player and connection in protocol.players:
connection.send_chat('Switched %s to %s team' % (player.name,
player.team.name))
protocol.irc_say('* %s silently switched teams' % player.name)
else:
player.respawn_time = protocol.respawn_time
player.set_team(new_team)
protocol.send_chat('%s switched teams' % player.name, irc = True)
@name('setbalance')
@admin
def set_balance(connection, value):
try:
value = int(value)
except ValueError:
return 'Invalid value %r. Use 0 for off, 1 and up for on' % value
protocol = connection.protocol
protocol.balanced_teams = value
protocol.send_chat('Balanced teams set to %s' % value)
connection.protocol.irc_say('* %s set balanced teams to %s' % (
connection.name, value))
@name('togglebuild')
@alias('tb')
@admin
def toggle_build(connection, player = None):
if player is not None:
player = get_player(connection.protocol, player)
value = not player.building
player.building = value
msg = '%s can build again' if value else '%s is disabled from building'
connection.protocol.send_chat(msg % player.name)
connection.protocol.irc_say('* %s %s building for %s' % (connection.name,
['disabled', 'enabled'][int(value)], player.name))
return
value = not connection.protocol.building
connection.protocol.building = value
on_off = ['OFF', 'ON'][int(value)]
connection.protocol.send_chat('Building has been toggled %s!' % on_off)
connection.protocol.irc_say('* %s toggled building %s' % (connection.name,
on_off))
@name('togglekill')
@alias('tk')
@admin
def toggle_kill(connection, player = None):
if player is not None:
player = get_player(connection.protocol, player)
value = not player.killing
player.killing = value
msg = '%s can kill again' if value else '%s is disabled from killing'
connection.protocol.send_chat(msg % player.name)
connection.protocol.irc_say('* %s %s killing for %s' % (connection.name,
['disabled', 'enabled'][int(value)], player.name))
return
value = not connection.protocol.killing
connection.protocol.killing = value
on_off = ['OFF', 'ON'][int(value)]
connection.protocol.send_chat('Killing has been toggled %s!' % on_off)
connection.protocol.irc_say('* %s toggled killing %s' % (connection.name,
on_off))
@name('toggleteamkill')
@admin
def toggle_teamkill(connection):
value = not connection.protocol.friendly_fire
connection.protocol.friendly_fire = value
on_off = ['OFF', 'ON'][int(value)]
connection.protocol.send_chat('Friendly fire has been toggled %s!' % on_off)
connection.protocol.irc_say('* %s toggled friendly fire %s' % (
connection.name, on_off))
@admin
def mute(connection, value):
player = get_player(connection.protocol, value)
if player.mute:
return '%s is already muted' % player.name
player.mute = True
message = '%s has been muted by %s' % (player.name, connection.name)
connection.protocol.send_chat(message, irc = True)
@admin
def unmute(connection, value):
player = get_player(connection.protocol, value)
if not player.mute:
return '%s is not muted' % player.name
player.mute = False
message = '%s has been unmuted by %s' % (player.name, connection.name)
connection.protocol.send_chat(message, irc = True)
def deaf(connection, value = None):
if value is not None:
if not connection.admin and not connection.rights.deaf:
return 'No administrator rights!'
connection = get_player(connection.protocol, value)
message = '%s deaf' % ('now' if not connection.deaf else 'no longer')
connection.protocol.irc_say('%s is %s' % (connection.name, message))
message = "You're " + message
if connection.deaf:
connection.deaf = False
connection.send_chat(message)
else:
connection.send_chat(message)
connection.deaf = True
@name('globalchat')
@admin
def global_chat(connection):
connection.protocol.global_chat = not connection.protocol.global_chat
connection.protocol.send_chat('Global chat %s' % ('enabled' if
connection.protocol.global_chat else 'disabled'), irc = True)
@alias('tp')
@admin
def teleport(connection, player1, player2 = None, silent = False):
player1 = get_player(connection.protocol, player1)
if player2 is not None:
if connection.admin or connection.rights.teleport_other:
player, target = player1, get_player(connection.protocol, player2)
silent = silent or player.invisible
message = ('%s ' + ('silently ' if silent else '') + 'teleported '
'%s to %s')
message = message % (connection.name, player.name, target.name)
else:
return 'No administrator rights!'
else:
if connection not in connection.protocol.players:
raise ValueError()
player, target = connection, player1
silent = silent or player.invisible
message = '%s ' + ('silently ' if silent else '') + 'teleported to %s'
message = message % (player.name, target.name)
x, y, z = target.get_location()
player.set_location(((x-0.5), (y-0.5), (z+0.5)))
if silent:
connection.protocol.irc_say('* ' + message)
else:
connection.protocol.send_chat(message, irc = True)
@admin
def unstick(connection, player = None):
if player is not None:
player = get_player(connection.protocol, player)
else:
player = connection
connection.protocol.send_chat("%s unstuck %s" %
(connection.name, player.name), irc = True)
player.set_location_safe(player.get_location())
@alias('tps')
@admin
def tpsilent(connection, player1, player2 = None):
teleport(connection, player1, player2, silent = True)
from pyspades.common import coordinates, to_coordinates
@name('goto')
@admin
def go_to(connection, value):
if connection not in connection.protocol.players:
raise KeyError()
move(connection, connection.name, value, silent = connection.invisible)
@name('gotos')
@admin
def go_to_silent(connection, value):
if connection not in connection.protocol.players:
raise KeyError()
move(connection, connection.name, value, True)
@admin
def move(connection, player, value, silent = False):
player = get_player(connection.protocol, player)
x, y = coordinates(value)
x += 32
y += 32
player.set_location((x, y, connection.protocol.map.get_height(x, y) - 2))
if connection is player:
message = ('%s ' + ('silently ' if silent else '') + 'teleported to '
'location %s')
message = message % (player.name, value.upper())
else:
message = ('%s ' + ('silently ' if silent else '') + 'teleported %s '
'to location %s')
message = message % (connection.name, player.name, value.upper())
if silent:
connection.protocol.irc_say('* ' + message)
else:
connection.protocol.send_chat(message, irc = True)
@admin
def where(connection, value = None):
if value is not None:
connection = get_player(connection.protocol, value)
elif connection not in connection.protocol.players:
raise ValueError()
x, y, z = connection.get_location()
return '%s is in %s (%s, %s, %s)' % (connection.name,
to_coordinates(x, y), int(x), int(y), int(z))
@alias('gods')
@admin
def godsilent(connection, value = None):
if value is not None:
connection = get_player(connection.protocol, value)
elif connection not in connection.protocol.players:
raise ValueError()
connection.god = not connection.god
if connection.protocol.set_god_build:
connection.god_build = connection.god
else:
connection.god_build = False
return 'You have entered god mode.'
@admin
def god(connection, value = None):
godsilent(connection, value)
if connection.god:
message = '%s entered GOD MODE!' % connection.name
else:
message = '%s returned to being a mere human' % connection.name
connection.protocol.send_chat(message, irc = True)
@name('godbuild')
@admin
def god_build(connection, player = None):
protocol = connection.protocol
if player is not None:
player = get_player(protocol, player)
elif connection in protocol.players:
player = connection
else:
raise ValueError()
if not player.god:
return 'Placing god blocks is only allowed in god mode'
player.god_build = not player.god_build
message = ('now placing god blocks' if player.god_build else
'no longer placing god blocks')
player.send_chat("You're %s" % message)
if connection is not player and connection in protocol.players:
connection.send_chat('%s is %s' % (player.name, message))
protocol.irc_say('* %s is %s' % (player.name, message))
@admin
def fly(connection, player = None):
protocol = connection.protocol
if player is not None:
player = get_player(protocol, player)
elif connection in protocol.players:
player = connection
else:
raise ValueError()
player.fly = not player.fly
message = 'now flying' if player.fly else 'no longer flying'
player.send_chat("You're %s" % message)
if connection is not player and connection in protocol.players:
connection.send_chat('%s is %s' % (player.name, message))
protocol.irc_say('* %s is %s' % (player.name, message))
from pyspades.contained import KillAction
from pyspades.server import create_player, set_tool, set_color, input_data, weapon_input
from pyspades.common import make_color
@alias('invis')
@alias('inv')
@admin
def invisible(connection, player = None):
protocol = connection.protocol
if player is not None:
player = get_player(protocol, player)
elif connection in protocol.players:
player = connection
else:
raise ValueError()
player.invisible = not player.invisible
player.filter_visibility_data = player.invisible
player.god = player.invisible
player.god_build = False
player.killing = not player.invisible
if player.invisible:
player.send_chat("You're now invisible")
protocol.irc_say('* %s became invisible' % player.name)
kill_action = KillAction()
kill_action.kill_type = choice([GRENADE_KILL, FALL_KILL])
kill_action.player_id = kill_action.killer_id = player.player_id
reactor.callLater(1.0 / NETWORK_FPS, protocol.send_contained,
kill_action, sender = player)
else:
player.send_chat("You return to visibility")
protocol.irc_say('* %s became visible' % player.name)
x, y, z = player.world_object.position.get()
create_player.player_id = player.player_id
create_player.name = player.name
create_player.x = x
create_player.y = y
create_player.z = z
create_player.weapon = player.weapon
create_player.team = player.team.id
world_object = player.world_object
input_data.player_id = player.player_id
input_data.up = world_object.up
input_data.down = world_object.down
input_data.left = world_object.left
input_data.right = world_object.right
input_data.jump = world_object.jump
input_data.crouch = world_object.crouch
input_data.sneak = world_object.sneak
input_data.sprint = world_object.sprint
set_tool.player_id = player.player_id
set_tool.value = player.tool
set_color.player_id = player.player_id
set_color.value = make_color(*player.color)
weapon_input.primary = world_object.primary_fire
weapon_input.secondary = world_object.secondary_fire
protocol.send_contained(create_player, sender = player, save = True)
protocol.send_contained(set_tool, sender = player)
protocol.send_contained(set_color, sender = player, save = True)
protocol.send_contained(input_data, sender = player)
protocol.send_contained(weapon_input, sender = player)
if connection is not player and connection in protocol.players:
if player.invisible:
return '%s is now invisible' % player.name
else:
return '%s is now visible' % player.name
@admin
def ip(connection, value = None):
if value is None:
if connection not in connection.protocol.players:
raise ValueError()
player = connection
else:
player = get_player(connection.protocol, value)
return 'The IP of %s is %s' % (player.name, player.address[0])
@name('whowas')
@admin
def who_was(connection, value):
value = value.lower()
ret = None
exact_match = False
for name, ip in connection.protocol.player_memory:
name_lower = name.lower()
if name_lower == value:
ret = (name, ip)
exact_match = True
elif not exact_match and name_lower.count(value):
ret = (name, ip)
if ret is None:
raise InvalidPlayer()
return "%s's most recent IP was %s" % ret
@name('resetgame')
@admin
def reset_game(connection):
resetting_player = connection
# irc compatibility
if resetting_player not in connection.protocol.players:
for player in connection.protocol.players.values():
resetting_player = player
if player.admin:
break
if resetting_player is connection:
return
connection.protocol.reset_game(resetting_player)
connection.protocol.on_game_end()
connection.protocol.send_chat('Game has been reset by %s' % connection.name,
irc = True)
from map import Map
import itertools
@name('map')
@admin
def change_planned_map(connection, *pre_maps):
name = connection.name
protocol = connection.protocol
# parse seed numbering
maps, map_list = parse_maps(pre_maps)
if not maps:
return 'Invalid map name'
map = maps[0]
protocol.planned_map = check_rotation([map])[0]
protocol.send_chat('%s changed next map to %s' % (name, map), irc = True)
@name('rotation')
@admin
def change_rotation(connection, *pre_maps):
name = connection.name
protocol = connection.protocol
maps, map_list = parse_maps(pre_maps)
if len(maps) == 0:
return 'Usage: /rotation <map1> <map2> <map3>...'
ret = protocol.set_map_rotation(maps, False)
if not ret:
return 'Invalid map in map rotation (%s)' % ret.map
protocol.send_chat("%s changed map rotation to %s." %
(name, map_list), irc=True)
@name('rotationadd')
@admin
def rotation_add(connection, *pre_maps):
name = connection.name
protocol = connection.protocol
new_maps, map_list = parse_maps(pre_maps)
maps = connection.protocol.get_map_rotation()
map_list = ", ".join(maps) + map_list
maps.extend(new_maps)
ret = protocol.set_map_rotation(maps, False)
if not ret:
return 'Invalid map in map rotation (%s)' % ret.map
protocol.send_chat("%s added %s to map rotation." %
(name, " ".join(pre_maps)), irc=True)
@name('showrotation')
def show_rotation(connection):
return ", ".join(connection.protocol.get_map_rotation())
@name('revertrotation')
@admin
def revert_rotation(connection):
protocol = connection.protocol
maps = protocol.config['maps']
protocol.set_map_rotation(maps, False)
protocol.irc_say("* %s reverted map rotation to %s" % (name, maps))
def mapname(connection):
return 'Current map: ' + connection.protocol.map_info.name
@admin
def advance(connection):
connection.protocol.advance_rotation('Map advance forced.')
@name('timelimit')
@admin
def set_time_limit(connection, value):
value = float(value)
protocol = connection.protocol
protocol.set_time_limit(value)
protocol.send_chat('Time limit set to %s' % value, irc = True)
@name('time')
def get_time_limit(connection):
advance_call = connection.protocol.advance_call
if advance_call is None:
return 'No time limit set'
left = int(math.ceil((advance_call.getTime() - reactor.seconds()) / 60.0))
return 'There are %s minutes left' % left
@name('servername')
@admin
def server_name(connection, *arg):
name = join_arguments(arg)
protocol = connection.protocol
protocol.config['name'] = name
protocol.update_format()
message = "%s changed servername to to '%s'" % (connection.name, name)
print message
connection.protocol.irc_say("* " + message)
if connection in connection.protocol.players:
return message
@name('master')
@admin
def toggle_master(connection):
protocol = connection.protocol
protocol.set_master_state(not protocol.master)
message = ("toggled master broadcast %s" % ['OFF', 'ON'][
int(protocol.master)])
protocol.irc_say("* %s " % connection.name + message)
if connection in connection.protocol.players:
return ("You " + message)
def ping(connection, value = None):
if value is None:
if connection not in connection.protocol.players:
raise ValueError()
player = connection
else:
player = get_player(connection.protocol, value)
ping = player.latency
if value is None:
return ('Your ping is %s ms. Lower ping is better!' % ping)
return "%s's ping is %s ms" % (player.name, ping)
def intel(connection):
if connection not in connection.protocol.players:
raise KeyError()
flag = connection.team.other.flag
if flag.player is not None:
if flag.player is connection:
return "You have the enemy intel, return to base!"
else:
return "%s has the enemy intel!" % flag.player.name
return "Nobody in your team has the enemy intel"
def version(connection):
return 'Server version is "%s"' % connection.protocol.server_version
@name('server')
def server_info(connection):
protocol = connection.protocol
msg = 'You are playing on "%s"' % protocol.name
if protocol.identifier is not None:
msg += ' at %s' % protocol.identifier
return msg
def scripts(connection):
scripts = connection.protocol.config.get('scripts', [])
return 'Scripts enabled: %s' % (', '.join(scripts))
@admin
def fog(connection, r, g, b):
r = int(r)
g = int(g)
b = int(b)
connection.protocol.set_fog_color((r, g, b))
def weapon(connection, value):
player = get_player(connection.protocol, value)
if player.weapon_object is None:
name = '(unknown)'
else:
name = player.weapon_object.name
return '%s has a %s' % (player.name, name)
command_list = [
help,
pm,
to_admin,
login,
kick,
intel,
ip,
who_was,
fog,
ban,
hban,
dban,
wban,
banip,
unban,
undo_ban,
mute,
unmute,
deaf,
global_chat,
say,
csay,
kill,
sanick,
heal,
lock,
unlock,
switch,
set_balance,
rules,
toggle_build,
toggle_kill,
toggle_teamkill,
teleport,
tpsilent,
go_to,
go_to_silent,
move,
unstick,
where,
god,
godsilent,
god_build,
fly,
invisible,
streak,
reset_game,
toggle_master,
change_planned_map,
change_rotation,
revert_rotation,
show_rotation,
rotation_add,
advance,
set_time_limit,
get_time_limit,
server_name,
ping,
version,
server_info,
scripts,
weapon,
mapname
]
def add(func, name = None):
"""
Function to add a command from scripts
"""
if name is None:
name = func.func_name
name = name.lower()
if not hasattr(func, 'argspec'):
func.argspec = inspect.getargspec(func)
add_rights(name, *getattr(func, 'user_types', ()))
commands[name] = func
try:
for alias in func.aliases:
aliases[alias.lower()] = name
except AttributeError:
pass
for command_func in command_list:
add(command_func)
# optional commands
try:
import pygeoip
database = pygeoip.GeoIP('./data/GeoLiteCity.dat')
@admin
@name('from')
def where_from(connection, value = None):
if value is None:
if connection not in connection.protocol.players:
raise ValueError()
player = connection
else:
player = get_player(connection.protocol, value)
record = database.record_by_addr(player.address[0])
if record is None:
return 'Player location could not be determined.'
items = []
for entry in ('country_name', 'city', 'region_name'):
# sometimes, the record entries are numbers or nonexistent
try:
value = record[entry]
int(value) # if this raises a ValueError, it's not a number
continue
except KeyError:
continue
except ValueError:
pass
items.append(value)
return '%s is from %s' % (player.name, ', '.join(items))
add(where_from)
except ImportError:
print "('from' command disabled - missing pygeoip)"
except (IOError, OSError):
print "('from' command disabled - missing data/GeoLiteCity.dat)"
def handle_command(connection, command, parameters):
command = command.lower()
try:
command = aliases[command]
except KeyError:
pass
try:
command_func = commands[command]
except KeyError:
return # 'Invalid command'
aspec = command_func.argspec
min_params = len(aspec.args) - 1 - len(aspec.defaults or ())
max_params = len(aspec.args) - 1 if aspec.varargs is None else None
len_params = len(parameters)
if (len_params < min_params
or max_params is not None and len_params > max_params):
return 'Invalid number of arguments for %s' % command
try:
if not has_rights(command_func, connection):
return "You can't use this command"
return command_func(connection, *parameters)
except KeyError:
return # 'Invalid command'
except TypeError as t:
print 'Command', command, 'failed with args:', parameters
print t
return 'Command failed'
except InvalidPlayer:
return 'No such player'
except InvalidTeam:
return 'Invalid team specifier'
except ValueError:
return 'Invalid parameters'
def handle_input(connection, input):
# for IRC and console
return handle_command(connection, *parse_command(input))
| iamgreaser/pysnip | feature_server/commands.py | Python | gpl-3.0 | 34,282 |
# Locate and load the lldb python module
import os
import sys
def import_lldb():
""" Find and import the lldb modules. This function tries to find the lldb module by:
1. Simply by doing "import lldb" in case the system python installation is aware of lldb. If that fails,
2. Executes the lldb executable pointed to by the LLDB environment variable (or if unset, the first lldb
on PATH") with the -P flag to determine the PYTHONPATH to set. If the lldb executable returns a valid
path, it is added to sys.path and the import is attempted again. If that fails, 3. On Mac OS X the
default Xcode 4.5 installation path.
"""
# Try simple 'import lldb', in case of a system-wide install or a
# pre-configured PYTHONPATH
try:
import lldb
return True
except ImportError:
pass
# Allow overriding default path to lldb executable with the LLDB
# environment variable
lldb_executable = 'lldb'
if 'LLDB' in os.environ and os.path.exists(os.environ['LLDB']):
lldb_executable = os.environ['LLDB']
# Try using builtin module location support ('lldb -P')
from subprocess import check_output, CalledProcessError
try:
with open(os.devnull, 'w') as fnull:
lldb_minus_p_path = check_output(
"%s -P" %
lldb_executable,
shell=True,
stderr=fnull).strip()
if not os.path.exists(lldb_minus_p_path):
# lldb -P returned invalid path, probably too old
pass
else:
sys.path.append(lldb_minus_p_path)
import lldb
return True
except CalledProcessError:
# Cannot run 'lldb -P' to determine location of lldb python module
pass
except ImportError:
# Unable to import lldb module from path returned by `lldb -P`
pass
# On Mac OS X, use the try the default path to XCode lldb module
if "darwin" in sys.platform:
xcode_python_path = "/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Versions/Current/Resources/Python/"
sys.path.append(xcode_python_path)
try:
import lldb
return True
except ImportError:
# Unable to import lldb module from default Xcode python path
pass
return False
if not import_lldb():
import vim
vim.command(
'redraw | echo "%s"' %
" Error loading lldb module; vim-lldb will be disabled. Check LLDB installation or set LLDB environment variable.")
| apple/swift-lldb | utils/vim-lldb/python-vim-lldb/import_lldb.py | Python | apache-2.0 | 2,603 |
#
#
# Copyright (C) 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Logical units for cluster verification."""
import itertools
import logging
import operator
import re
import time
import ganeti.masterd.instance
import ganeti.rpc.node as rpc
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import locking
from ganeti import pathutils
from ganeti import utils
from ganeti import vcluster
from ganeti import hypervisor
from ganeti import opcodes
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, ResultWithJobs
from ganeti.cmdlib.common import ShareAll, ComputeAncillaryFiles, \
CheckNodePVs, ComputeIPolicyInstanceViolation, AnnotateDiskParams, \
SupportsOob
def _GetAllHypervisorParameters(cluster, instances):
"""Compute the set of all hypervisor parameters.
@type cluster: L{objects.Cluster}
@param cluster: the cluster object
@param instances: list of L{objects.Instance}
@param instances: additional instances from which to obtain parameters
@rtype: list of (origin, hypervisor, parameters)
@return: a list with all parameters found, indicating the hypervisor they
apply to, and the origin (can be "cluster", "os X", or "instance Y")
"""
hvp_data = []
for hv_name in cluster.enabled_hypervisors:
hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
for os_name, os_hvp in cluster.os_hvp.items():
for hv_name, hv_params in os_hvp.items():
if hv_params:
full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
hvp_data.append(("os %s" % os_name, hv_name, full_params))
# TODO: collapse identical parameter values in a single one
for instance in instances:
if instance.hvparams:
hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
cluster.FillHV(instance)))
return hvp_data
class _VerifyErrors(object):
"""Mix-in for cluster/group verify LUs.
It provides _Error and _ErrorIf, and updates the self.bad boolean. (Expects
self.op and self._feedback_fn to be available.)
"""
ETYPE_ERROR = constants.CV_ERROR
ETYPE_WARNING = constants.CV_WARNING
def _ErrorMsgList(self, error_descriptor, object_name, message_list,
log_type=ETYPE_ERROR):
"""Format multiple error messages.
Based on the opcode's error_codes parameter, either format a
parseable error code, or a simpler error string.
This must be called only from Exec and functions called from Exec.
@type error_descriptor: tuple (string, string, string)
@param error_descriptor: triplet describing the error (object_type,
code, description)
@type object_name: string
@param object_name: name of object (instance, node ..) the error relates to
@type message_list: list of strings
@param message_list: body of error messages
@type log_type: string
@param log_type: log message type (WARNING, ERROR ..)
"""
# Called with empty list - nothing to do
if not message_list:
return
object_type, error_code, _ = error_descriptor
# If the error code is in the list of ignored errors, demote the error to a
# warning
if error_code in self.op.ignore_errors: # pylint: disable=E1101
log_type = self.ETYPE_WARNING
prefixed_list = []
if self.op.error_codes: # This is a mix-in. pylint: disable=E1101
for msg in message_list:
prefixed_list.append(" - %s:%s:%s:%s:%s" % (
log_type, error_code, object_type, object_name, msg))
else:
if not object_name:
object_name = ""
for msg in message_list:
prefixed_list.append(" - %s: %s %s: %s" % (
log_type, object_type, object_name, msg))
# Report messages via the feedback_fn
# pylint: disable=E1101
self._feedback_fn(constants.ELOG_MESSAGE_LIST, prefixed_list)
# do not mark the operation as failed for WARN cases only
if log_type == self.ETYPE_ERROR:
self.bad = True
def _ErrorMsg(self, error_descriptor, object_name, message,
log_type=ETYPE_ERROR):
"""Log a single error message.
"""
self._ErrorMsgList(error_descriptor, object_name, [message], log_type)
# TODO: Replace this method with a cleaner interface, get rid of the if
# condition as it only rarely saves lines, but makes things less readable.
def _ErrorIf(self, cond, *args, **kwargs):
"""Log an error message if the passed condition is True.
"""
if (bool(cond)
or self.op.debug_simulate_errors): # pylint: disable=E1101
self._Error(*args, **kwargs)
# TODO: Replace this method with a cleaner interface
def _Error(self, ecode, item, message, *args, **kwargs):
"""Log an error message if the passed condition is True.
"""
#TODO: Remove 'code' argument in favour of using log_type
log_type = kwargs.get('code', self.ETYPE_ERROR)
if args:
message = message % args
self._ErrorMsgList(ecode, item, [message], log_type=log_type)
class LUClusterVerify(NoHooksLU):
"""Submits all jobs necessary to verify the cluster.
"""
REQ_BGL = False
def ExpandNames(self):
self.needed_locks = {}
def Exec(self, feedback_fn):
jobs = []
if self.op.group_name:
groups = [self.op.group_name]
depends_fn = lambda: None
else:
groups = self.cfg.GetNodeGroupList()
# Verify global configuration
jobs.append([
opcodes.OpClusterVerifyConfig(ignore_errors=self.op.ignore_errors),
])
# Always depend on global verification
depends_fn = lambda: [(-len(jobs), [])]
jobs.extend(
[opcodes.OpClusterVerifyGroup(group_name=group,
ignore_errors=self.op.ignore_errors,
depends=depends_fn(),
verify_clutter=self.op.verify_clutter)]
for group in groups)
# Fix up all parameters
for op in itertools.chain(*jobs):
op.debug_simulate_errors = self.op.debug_simulate_errors
op.verbose = self.op.verbose
op.error_codes = self.op.error_codes
try:
op.skip_checks = self.op.skip_checks
except AttributeError:
assert not isinstance(op, opcodes.OpClusterVerifyGroup)
return ResultWithJobs(jobs)
class LUClusterVerifyDisks(NoHooksLU):
"""Verifies the cluster disks status.
"""
REQ_BGL = False
def ExpandNames(self):
self.share_locks = ShareAll()
if self.op.group_name:
self.needed_locks = {
locking.LEVEL_NODEGROUP: [self.cfg.LookupNodeGroup(self.op.group_name)]
}
else:
self.needed_locks = {
locking.LEVEL_NODEGROUP: locking.ALL_SET,
}
def Exec(self, feedback_fn):
group_names = self.owned_locks(locking.LEVEL_NODEGROUP)
instances = self.cfg.GetInstanceList()
only_ext = compat.all(
self.cfg.GetInstanceDiskTemplate(i) == constants.DT_EXT
for i in instances)
# We skip current NodeGroup verification if there are only external storage
# devices. Currently we provide an interface for external storage provider
# for disk verification implementations, however current ExtStorageDevice
# does not provide an API for this yet.
#
# This check needs to be revisited if ES_ACTION_VERIFY on ExtStorageDevice
# is implemented.
if only_ext:
logging.info("All instances have ext storage, skipping verify disks.")
return ResultWithJobs([])
else:
# Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
for group in group_names])
class LUClusterVerifyConfig(NoHooksLU, _VerifyErrors):
"""Verifies the cluster config.
"""
REQ_BGL = False
def _VerifyHVP(self, hvp_data):
"""Verifies locally the syntax of the hypervisor parameters.
"""
for item, hv_name, hv_params in hvp_data:
msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
(item, hv_name))
try:
hv_class = hypervisor.GetHypervisorClass(hv_name)
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class.CheckParameterSyntax(hv_params)
except errors.GenericError, err:
self._ErrorIf(True, constants.CV_ECLUSTERCFG, None, msg % str(err))
def ExpandNames(self):
self.needed_locks = dict.fromkeys(locking.LEVELS, locking.ALL_SET)
self.share_locks = ShareAll()
def CheckPrereq(self):
"""Check prerequisites.
"""
# Retrieve all information
self.all_group_info = self.cfg.GetAllNodeGroupsInfo()
self.all_node_info = self.cfg.GetAllNodesInfo()
self.all_inst_info = self.cfg.GetAllInstancesInfo()
def Exec(self, feedback_fn):
"""Verify integrity of cluster, performing various test on nodes.
"""
self.bad = False
self._feedback_fn = feedback_fn
feedback_fn("* Verifying cluster config")
msg_list = self.cfg.VerifyConfig()
self._ErrorMsgList(constants.CV_ECLUSTERCFG, None, msg_list)
feedback_fn("* Verifying cluster certificate files")
for cert_filename in pathutils.ALL_CERT_FILES:
(errcode, msg) = utils.VerifyCertificate(cert_filename)
self._ErrorIf(errcode, constants.CV_ECLUSTERCERT, None, msg, code=errcode)
self._ErrorIf(not utils.CanRead(constants.LUXID_USER,
pathutils.NODED_CERT_FILE),
constants.CV_ECLUSTERCERT,
None,
pathutils.NODED_CERT_FILE + " must be accessible by the " +
constants.LUXID_USER + " user")
feedback_fn("* Verifying hypervisor parameters")
self._VerifyHVP(_GetAllHypervisorParameters(self.cfg.GetClusterInfo(),
self.all_inst_info.values()))
feedback_fn("* Verifying all nodes belong to an existing group")
# We do this verification here because, should this bogus circumstance
# occur, it would never be caught by VerifyGroup, which only acts on
# nodes/instances reachable from existing node groups.
dangling_nodes = set(node for node in self.all_node_info.values()
if node.group not in self.all_group_info)
dangling_instances = {}
no_node_instances = []
for inst in self.all_inst_info.values():
if inst.primary_node in [node.uuid for node in dangling_nodes]:
dangling_instances.setdefault(inst.primary_node, []).append(inst)
elif inst.primary_node not in self.all_node_info:
no_node_instances.append(inst)
pretty_dangling = [
"%s (%s)" %
(node.name,
utils.CommaJoin(inst.name for
inst in dangling_instances.get(node.uuid, [])))
for node in dangling_nodes]
self._ErrorIf(bool(dangling_nodes), constants.CV_ECLUSTERDANGLINGNODES,
None,
"the following nodes (and their instances) belong to a non"
" existing group: %s", utils.CommaJoin(pretty_dangling))
self._ErrorIf(bool(no_node_instances), constants.CV_ECLUSTERDANGLINGINST,
None,
"the following instances have a non-existing primary-node:"
" %s", utils.CommaJoin(inst.name for
inst in no_node_instances))
return not self.bad
class LUClusterVerifyGroup(LogicalUnit, _VerifyErrors):
"""Verifies the status of a node group.
"""
HPATH = "cluster-verify"
HTYPE = constants.HTYPE_CLUSTER
REQ_BGL = False
_HOOKS_INDENT_RE = re.compile("^", re.M)
class NodeImage(object):
"""A class representing the logical and physical status of a node.
@type uuid: string
@ivar uuid: the node UUID to which this object refers
@ivar volumes: a structure as returned from
L{ganeti.backend.GetVolumeList} (runtime)
@ivar instances: a list of running instances (runtime)
@ivar pinst: list of configured primary instances (config)
@ivar sinst: list of configured secondary instances (config)
@ivar sbp: dictionary of {primary-node: list of instances} for all
instances for which this node is secondary (config)
@ivar mfree: free memory, as reported by hypervisor (runtime)
@ivar dfree: free disk, as reported by the node (runtime)
@ivar offline: the offline status (config)
@type rpc_fail: boolean
@ivar rpc_fail: whether the RPC verify call was successfull (overall,
not whether the individual keys were correct) (runtime)
@type lvm_fail: boolean
@ivar lvm_fail: whether the RPC call didn't return valid LVM data
@type hyp_fail: boolean
@ivar hyp_fail: whether the RPC call didn't return the instance list
@type ghost: boolean
@ivar ghost: whether this is a known node or not (config)
@type os_fail: boolean
@ivar os_fail: whether the RPC call didn't return valid OS data
@type oslist: list
@ivar oslist: list of OSes as diagnosed by DiagnoseOS
@type vm_capable: boolean
@ivar vm_capable: whether the node can host instances
@type pv_min: float
@ivar pv_min: size in MiB of the smallest PVs
@type pv_max: float
@ivar pv_max: size in MiB of the biggest PVs
"""
def __init__(self, offline=False, uuid=None, vm_capable=True):
self.uuid = uuid
self.volumes = {}
self.instances = []
self.pinst = []
self.sinst = []
self.sbp = {}
self.mfree = 0
self.dfree = 0
self.offline = offline
self.vm_capable = vm_capable
self.rpc_fail = False
self.lvm_fail = False
self.hyp_fail = False
self.ghost = False
self.os_fail = False
self.oslist = {}
self.pv_min = None
self.pv_max = None
def ExpandNames(self):
# This raises errors.OpPrereqError on its own:
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
# Get instances in node group; this is unsafe and needs verification later
inst_uuids = \
self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
self.needed_locks = {
locking.LEVEL_INSTANCE: self.cfg.GetInstanceNames(inst_uuids),
locking.LEVEL_NODEGROUP: [self.group_uuid],
locking.LEVEL_NODE: [],
}
self.share_locks = ShareAll()
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE:
# Get members of node group; this is unsafe and needs verification later
nodes = set(self.cfg.GetNodeGroup(self.group_uuid).members)
# In Exec(), we warn about mirrored instances that have primary and
# secondary living in separate node groups. To fully verify that
# volumes for these instances are healthy, we will need to do an
# extra call to their secondaries. We ensure here those nodes will
# be locked.
for inst_name in self.owned_locks(locking.LEVEL_INSTANCE):
# Important: access only the instances whose lock is owned
instance = self.cfg.GetInstanceInfoByName(inst_name)
disks = self.cfg.GetInstanceDisks(instance.uuid)
if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
nodes.update(self.cfg.GetInstanceSecondaryNodes(instance.uuid))
self.needed_locks[locking.LEVEL_NODE] = nodes
def CheckPrereq(self):
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
group_node_uuids = set(self.group_info.members)
group_inst_uuids = \
self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
unlocked_node_uuids = \
group_node_uuids.difference(self.owned_locks(locking.LEVEL_NODE))
unlocked_inst_uuids = \
group_inst_uuids.difference(
[self.cfg.GetInstanceInfoByName(name).uuid
for name in self.owned_locks(locking.LEVEL_INSTANCE)])
if unlocked_node_uuids:
raise errors.OpPrereqError(
"Missing lock for nodes: %s" %
utils.CommaJoin(self.cfg.GetNodeNames(unlocked_node_uuids)),
errors.ECODE_STATE)
if unlocked_inst_uuids:
raise errors.OpPrereqError(
"Missing lock for instances: %s" %
utils.CommaJoin(self.cfg.GetInstanceNames(unlocked_inst_uuids)),
errors.ECODE_STATE)
self.all_node_info = self.cfg.GetAllNodesInfo()
self.all_inst_info = self.cfg.GetAllInstancesInfo()
self.all_disks_info = self.cfg.GetAllDisksInfo()
self.my_node_uuids = group_node_uuids
self.my_node_info = dict((node_uuid, self.all_node_info[node_uuid])
for node_uuid in group_node_uuids)
self.my_inst_uuids = group_inst_uuids
self.my_inst_info = dict((inst_uuid, self.all_inst_info[inst_uuid])
for inst_uuid in group_inst_uuids)
# We detect here the nodes that will need the extra RPC calls for verifying
# split LV volumes; they should be locked.
extra_lv_nodes = {}
for inst in self.my_inst_info.values():
disks = self.cfg.GetInstanceDisks(inst.uuid)
if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
inst_nodes = self.cfg.GetInstanceNodes(inst.uuid)
for nuuid in inst_nodes:
if self.all_node_info[nuuid].group != self.group_uuid:
if nuuid in extra_lv_nodes:
extra_lv_nodes[nuuid].append(inst.name)
else:
extra_lv_nodes[nuuid] = [inst.name]
extra_lv_nodes_set = set(extra_lv_nodes.iterkeys())
unlocked_lv_nodes = \
extra_lv_nodes_set.difference(self.owned_locks(locking.LEVEL_NODE))
if unlocked_lv_nodes:
node_strings = ['%s: [%s]' % (
self.cfg.GetNodeName(node), utils.CommaJoin(extra_lv_nodes[node]))
for node in unlocked_lv_nodes]
raise errors.OpPrereqError("Missing node locks for LV check: %s" %
utils.CommaJoin(node_strings),
errors.ECODE_STATE)
self.extra_lv_nodes = list(extra_lv_nodes_set)
def _VerifyNode(self, ninfo, nresult):
"""Perform some basic validation on data returned from a node.
- check the result data structure is well formed and has all the
mandatory fields
- check ganeti version
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the results from the node
@rtype: boolean
@return: whether overall this call was successful (and we can expect
reasonable values in the respose)
"""
# main result, nresult should be a non-empty dict
test = not nresult or not isinstance(nresult, dict)
self._ErrorIf(test, constants.CV_ENODERPC, ninfo.name,
"unable to verify node: no data returned")
if test:
return False
# compares ganeti version
local_version = constants.PROTOCOL_VERSION
remote_version = nresult.get("version", None)
test = not (remote_version and
isinstance(remote_version, (list, tuple)) and
len(remote_version) == 2)
self._ErrorIf(test, constants.CV_ENODERPC, ninfo.name,
"connection to node returned invalid data")
if test:
return False
test = local_version != remote_version[0]
self._ErrorIf(test, constants.CV_ENODEVERSION, ninfo.name,
"incompatible protocol versions: master %s,"
" node %s", local_version, remote_version[0])
if test:
return False
# node seems compatible, we can actually try to look into its results
# full package version
self._ErrorIf(constants.RELEASE_VERSION != remote_version[1],
constants.CV_ENODEVERSION, ninfo.name,
"software version mismatch: master %s, node %s",
constants.RELEASE_VERSION, remote_version[1],
code=self.ETYPE_WARNING)
hyp_result = nresult.get(constants.NV_HYPERVISOR, None)
if ninfo.vm_capable and isinstance(hyp_result, dict):
for hv_name, hv_result in hyp_result.iteritems():
test = hv_result is not None
self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
"hypervisor %s verify failure: '%s'", hv_name, hv_result)
hvp_result = nresult.get(constants.NV_HVPARAMS, None)
if ninfo.vm_capable and isinstance(hvp_result, list):
for item, hv_name, hv_result in hvp_result:
self._ErrorIf(True, constants.CV_ENODEHV, ninfo.name,
"hypervisor %s parameter verify failure (source %s): %s",
hv_name, item, hv_result)
test = nresult.get(constants.NV_NODESETUP,
["Missing NODESETUP results"])
self._ErrorIf(test, constants.CV_ENODESETUP, ninfo.name,
"node setup error: %s", "; ".join(test))
return True
def _VerifyNodeTime(self, ninfo, nresult,
nvinfo_starttime, nvinfo_endtime):
"""Check the node time.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param nvinfo_starttime: the start time of the RPC call
@param nvinfo_endtime: the end time of the RPC call
"""
ntime = nresult.get(constants.NV_TIME, None)
try:
ntime_merged = utils.MergeTime(ntime)
except (ValueError, TypeError):
self._ErrorIf(True, constants.CV_ENODETIME, ninfo.name,
"Node returned invalid time")
return
if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW):
ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW):
ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
else:
ntime_diff = None
self._ErrorIf(ntime_diff is not None, constants.CV_ENODETIME, ninfo.name,
"Node time diverges by at least %s from master node time",
ntime_diff)
def _UpdateVerifyNodeLVM(self, ninfo, nresult, vg_name, nimg):
"""Check the node LVM results and update info for cross-node checks.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param vg_name: the configured VG name
@type nimg: L{NodeImage}
@param nimg: node image
"""
if vg_name is None:
return
# checks vg existence and size > 20G
vglist = nresult.get(constants.NV_VGLIST, None)
test = not vglist
self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
"unable to check volume groups")
if not test:
vgstatus = utils.CheckVolumeGroupSize(vglist, vg_name,
constants.MIN_VG_SIZE)
self._ErrorIf(vgstatus, constants.CV_ENODELVM, ninfo.name, vgstatus)
# Check PVs
(errmsgs, pvminmax) = CheckNodePVs(nresult, self._exclusive_storage)
for em in errmsgs:
self._Error(constants.CV_ENODELVM, ninfo.name, em)
if pvminmax is not None:
(nimg.pv_min, nimg.pv_max) = pvminmax
def _VerifyGroupDRBDVersion(self, node_verify_infos):
"""Check cross-node DRBD version consistency.
@type node_verify_infos: dict
@param node_verify_infos: infos about nodes as returned from the
node_verify call.
"""
node_versions = {}
for node_uuid, ndata in node_verify_infos.items():
nresult = ndata.payload
if nresult:
version = nresult.get(constants.NV_DRBDVERSION, None)
if version:
node_versions[node_uuid] = version
if len(set(node_versions.values())) > 1:
for node_uuid, version in sorted(node_versions.items()):
msg = "DRBD version mismatch: %s" % version
self._Error(constants.CV_ENODEDRBDHELPER, node_uuid, msg,
code=self.ETYPE_WARNING)
def _VerifyGroupLVM(self, node_image, vg_name):
"""Check cross-node consistency in LVM.
@type node_image: dict
@param node_image: info about nodes, mapping from node to names to
L{NodeImage} objects
@param vg_name: the configured VG name
"""
if vg_name is None:
return
# Only exclusive storage needs this kind of checks
if not self._exclusive_storage:
return
# exclusive_storage wants all PVs to have the same size (approximately),
# if the smallest and the biggest ones are okay, everything is fine.
# pv_min is None iff pv_max is None
vals = [ni for ni in node_image.values() if ni.pv_min is not None]
if not vals:
return
(pvmin, minnode_uuid) = min((ni.pv_min, ni.uuid) for ni in vals)
(pvmax, maxnode_uuid) = max((ni.pv_max, ni.uuid) for ni in vals)
bad = utils.LvmExclusiveTestBadPvSizes(pvmin, pvmax)
self._ErrorIf(bad, constants.CV_EGROUPDIFFERENTPVSIZE, self.group_info.name,
"PV sizes differ too much in the group; smallest (%s MB) is"
" on %s, biggest (%s MB) is on %s",
pvmin, self.cfg.GetNodeName(minnode_uuid),
pvmax, self.cfg.GetNodeName(maxnode_uuid))
def _VerifyNodeBridges(self, ninfo, nresult, bridges):
"""Check the node bridges.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param bridges: the expected list of bridges
"""
if not bridges:
return
missing = nresult.get(constants.NV_BRIDGES, None)
test = not isinstance(missing, list)
self._ErrorIf(test, constants.CV_ENODENET, ninfo.name,
"did not return valid bridge information")
if not test:
self._ErrorIf(bool(missing), constants.CV_ENODENET, ninfo.name,
"missing bridges: %s" % utils.CommaJoin(sorted(missing)))
def _VerifyNodeUserScripts(self, ninfo, nresult):
"""Check the results of user scripts presence and executability on the node
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
"""
test = not constants.NV_USERSCRIPTS in nresult
self._ErrorIf(test, constants.CV_ENODEUSERSCRIPTS, ninfo.name,
"did not return user scripts information")
broken_scripts = nresult.get(constants.NV_USERSCRIPTS, None)
if not test:
self._ErrorIf(broken_scripts, constants.CV_ENODEUSERSCRIPTS, ninfo.name,
"user scripts not present or not executable: %s" %
utils.CommaJoin(sorted(broken_scripts)))
def _VerifyNodeNetwork(self, ninfo, nresult):
"""Check the node network connectivity results.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
"""
test = constants.NV_NODELIST not in nresult
self._ErrorIf(test, constants.CV_ENODESSH, ninfo.name,
"node hasn't returned node ssh connectivity data")
if not test:
if nresult[constants.NV_NODELIST]:
for a_node, a_msg in nresult[constants.NV_NODELIST].items():
self._ErrorIf(True, constants.CV_ENODESSH, ninfo.name,
"ssh communication with node '%s': %s", a_node, a_msg)
if constants.NV_NODENETTEST not in nresult:
self._ErrorMsg(constants.CV_ENODENET, ninfo.name,
"node hasn't returned node tcp connectivity data")
elif nresult[constants.NV_NODENETTEST]:
nlist = utils.NiceSort(nresult[constants.NV_NODENETTEST].keys())
msglist = []
for node in nlist:
msglist.append("tcp communication with node '%s': %s" %
(node, nresult[constants.NV_NODENETTEST][node]))
self._ErrorMsgList(constants.CV_ENODENET, ninfo.name, msglist)
if constants.NV_MASTERIP not in nresult:
self._ErrorMsg(constants.CV_ENODENET, ninfo.name,
"node hasn't returned node master IP reachability data")
elif nresult[constants.NV_MASTERIP] is False: # be explicit, could be None
if ninfo.uuid == self.master_node:
msg = "the master node cannot reach the master IP (not configured?)"
else:
msg = "cannot reach the master IP"
self._ErrorMsg(constants.CV_ENODENET, ninfo.name, msg)
def _VerifyInstance(self, instance, node_image, diskstatus):
"""Verify an instance.
This function checks to see if the required block devices are
available on the instance's node, and that the nodes are in the correct
state.
"""
pnode_uuid = instance.primary_node
pnode_img = node_image[pnode_uuid]
groupinfo = self.cfg.GetAllNodeGroupsInfo()
node_vol_should = {}
self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
cluster = self.cfg.GetClusterInfo()
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
self.group_info)
err = ComputeIPolicyInstanceViolation(ipolicy, instance, self.cfg)
self._ErrorIf(err, constants.CV_EINSTANCEPOLICY, instance.name,
utils.CommaJoin(err), code=self.ETYPE_WARNING)
for node_uuid in node_vol_should:
n_img = node_image[node_uuid]
if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
# ignore missing volumes on offline or broken nodes
continue
for volume in node_vol_should[node_uuid]:
test = volume not in n_img.volumes
self._ErrorIf(test, constants.CV_EINSTANCEMISSINGDISK, instance.name,
"volume %s missing on node %s", volume,
self.cfg.GetNodeName(node_uuid))
if instance.admin_state == constants.ADMINST_UP:
test = instance.uuid not in pnode_img.instances and not pnode_img.offline
self._ErrorIf(test, constants.CV_EINSTANCEDOWN, instance.name,
"instance not running on its primary node %s",
self.cfg.GetNodeName(pnode_uuid))
self._ErrorIf(pnode_img.offline, constants.CV_EINSTANCEBADNODE,
instance.name, "instance is marked as running and lives on"
" offline node %s", self.cfg.GetNodeName(pnode_uuid))
diskdata = [(nname, success, status, idx)
for (nname, disks) in diskstatus.items()
for idx, (success, status) in enumerate(disks)]
for nname, success, bdev_status, idx in diskdata:
# the 'ghost node' construction in Exec() ensures that we have a
# node here
snode = node_image[nname]
bad_snode = snode.ghost or snode.offline
self._ErrorIf(instance.disks_active and
not success and not bad_snode,
constants.CV_EINSTANCEFAULTYDISK, instance.name,
"couldn't retrieve status for disk/%s on %s: %s",
idx, self.cfg.GetNodeName(nname), bdev_status)
if instance.disks_active and success and bdev_status.is_degraded:
msg = "disk/%s on %s is degraded" % (idx, self.cfg.GetNodeName(nname))
code = self.ETYPE_ERROR
accepted_lds = [constants.LDS_OKAY, constants.LDS_SYNC]
if bdev_status.ldisk_status in accepted_lds:
code = self.ETYPE_WARNING
msg += "; local disk state is '%s'" % \
constants.LDS_NAMES[bdev_status.ldisk_status]
self._Error(constants.CV_EINSTANCEFAULTYDISK, instance.name, msg,
code=code)
self._ErrorIf(pnode_img.rpc_fail and not pnode_img.offline,
constants.CV_ENODERPC, self.cfg.GetNodeName(pnode_uuid),
"instance %s, connection to primary node failed",
instance.name)
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
self._ErrorIf(len(secondary_nodes) > 1,
constants.CV_EINSTANCELAYOUT, instance.name,
"instance has multiple secondary nodes: %s",
utils.CommaJoin(secondary_nodes),
code=self.ETYPE_WARNING)
inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes)
disks = self.cfg.GetInstanceDisks(instance.uuid)
if any(es_flags.values()):
if not utils.AllDiskOfType(disks, constants.DTS_EXCL_STORAGE):
# Disk template not compatible with exclusive_storage: no instance
# node should have the flag set
es_nodes = [n
for (n, es) in es_flags.items()
if es]
unsupported = [d.dev_type for d in disks
if d.dev_type not in constants.DTS_EXCL_STORAGE]
self._Error(constants.CV_EINSTANCEUNSUITABLENODE, instance.name,
"instance uses disk types %s, which are not supported on"
" nodes that have exclusive storage set: %s",
utils.CommaJoin(unsupported),
utils.CommaJoin(self.cfg.GetNodeNames(es_nodes)))
for (idx, disk) in enumerate(disks):
self._ErrorIf(disk.spindles is None,
constants.CV_EINSTANCEMISSINGCFGPARAMETER, instance.name,
"number of spindles not configured for disk %s while"
" exclusive storage is enabled, try running"
" gnt-cluster repair-disk-sizes", idx)
if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
instance_nodes = utils.NiceSort(inst_nodes)
instance_groups = {}
for node_uuid in instance_nodes:
instance_groups.setdefault(self.all_node_info[node_uuid].group,
[]).append(node_uuid)
pretty_list = [
"%s (group %s)" % (utils.CommaJoin(self.cfg.GetNodeNames(nodes)),
groupinfo[group].name)
# Sort so that we always list the primary node first.
for group, nodes in sorted(instance_groups.items(),
key=lambda (_, nodes): pnode_uuid in nodes,
reverse=True)]
self._ErrorIf(len(instance_groups) > 1,
constants.CV_EINSTANCESPLITGROUPS,
instance.name, "instance has primary and secondary nodes in"
" different groups: %s", utils.CommaJoin(pretty_list),
code=self.ETYPE_WARNING)
inst_nodes_offline = []
for snode in secondary_nodes:
s_img = node_image[snode]
self._ErrorIf(s_img.rpc_fail and not s_img.offline, constants.CV_ENODERPC,
self.cfg.GetNodeName(snode),
"instance %s, connection to secondary node failed",
instance.name)
if s_img.offline:
inst_nodes_offline.append(snode)
# warn that the instance lives on offline nodes
self._ErrorIf(inst_nodes_offline, constants.CV_EINSTANCEBADNODE,
instance.name, "instance has offline secondary node(s) %s",
utils.CommaJoin(self.cfg.GetNodeNames(inst_nodes_offline)))
# ... or ghost/non-vm_capable nodes
for node_uuid in inst_nodes:
self._ErrorIf(node_image[node_uuid].ghost, constants.CV_EINSTANCEBADNODE,
instance.name, "instance lives on ghost node %s",
self.cfg.GetNodeName(node_uuid))
self._ErrorIf(not node_image[node_uuid].vm_capable,
constants.CV_EINSTANCEBADNODE, instance.name,
"instance lives on non-vm_capable node %s",
self.cfg.GetNodeName(node_uuid))
def _VerifyOrphanVolumes(self, vg_name, node_vol_should, node_image,
reserved):
"""Verify if there are any unknown volumes in the cluster.
The .os, .swap and backup volumes are ignored. All other volumes are
reported as unknown.
@type vg_name: string
@param vg_name: the name of the Ganeti-administered volume group
@type node_vol_should: dict
@param node_vol_should: mapping of node UUIDs to expected LVs on each node
@type node_image: dict
@param node_image: mapping of node UUIDs to L{NodeImage} objects
@type reserved: L{ganeti.utils.FieldSet}
@param reserved: a FieldSet of reserved volume names
"""
for node_uuid, n_img in node_image.items():
if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
self.all_node_info[node_uuid].group != self.group_uuid):
# skip non-healthy nodes
continue
for volume in n_img.volumes:
# skip volumes not belonging to the ganeti-administered volume group
if volume.split('/')[0] != vg_name:
continue
test = ((node_uuid not in node_vol_should or
volume not in node_vol_should[node_uuid]) and
not reserved.Matches(volume))
self._ErrorIf(test, constants.CV_ENODEORPHANLV,
self.cfg.GetNodeName(node_uuid),
"volume %s is unknown", volume,
code=_VerifyErrors.ETYPE_WARNING)
def _VerifyNPlusOneMemory(self, node_image, all_insts):
"""Verify N+1 Memory Resilience.
Check that if one single node dies we can still start all the
instances it was primary for.
"""
cluster_info = self.cfg.GetClusterInfo()
for node_uuid, n_img in node_image.items():
# This code checks that every node which is now listed as
# secondary has enough memory to host all instances it is
# supposed to should a single other node in the cluster fail.
# FIXME: not ready for failover to an arbitrary node
# FIXME: does not support file-backed instances
# WARNING: we currently take into account down instances as well
# as up ones, considering that even if they're down someone
# might want to start them even in the event of a node failure.
if n_img.offline or \
self.all_node_info[node_uuid].group != self.group_uuid:
# we're skipping nodes marked offline and nodes in other groups from
# the N+1 warning, since most likely we don't have good memory
# information from them; we already list instances living on such
# nodes, and that's enough warning
continue
#TODO(dynmem): also consider ballooning out other instances
for prinode, inst_uuids in n_img.sbp.items():
needed_mem = 0
for inst_uuid in inst_uuids:
bep = cluster_info.FillBE(all_insts[inst_uuid])
if bep[constants.BE_AUTO_BALANCE]:
needed_mem += bep[constants.BE_MINMEM]
test = n_img.mfree < needed_mem
self._ErrorIf(test, constants.CV_ENODEN1,
self.cfg.GetNodeName(node_uuid),
"not enough memory to accomodate instance failovers"
" should node %s fail (%dMiB needed, %dMiB available)",
self.cfg.GetNodeName(prinode), needed_mem, n_img.mfree)
def _CertError(self, *args):
"""Helper function for _VerifyClientCertificates."""
self._Error(constants.CV_ECLUSTERCLIENTCERT, None, *args)
self._cert_error_found = True
def _VerifyClientCertificates(self, nodes, all_nvinfo):
"""Verifies the consistency of the client certificates.
This includes several aspects:
- the individual validation of all nodes' certificates
- the consistency of the master candidate certificate map
- the consistency of the master candidate certificate map with the
certificates that the master candidates are actually using.
@param nodes: the list of nodes to consider in this verification
@param all_nvinfo: the map of results of the verify_node call to
all nodes
"""
rebuild_certs_msg = (
"To rebuild node certificates, please run"
" 'gnt-cluster renew-crypto --new-node-certificates'.")
self._cert_error_found = False
candidate_certs = self.cfg.GetClusterInfo().candidate_certs
if not candidate_certs:
self._CertError(
"The cluster's list of master candidate certificates is empty."
" This may be because you just updated the cluster. " +
rebuild_certs_msg)
return
if len(candidate_certs) != len(set(candidate_certs.values())):
self._CertError(
"There are at least two master candidates configured to use the same"
" certificate.")
# collect the client certificate
for node in nodes:
if node.offline:
continue
nresult = all_nvinfo[node.uuid]
if nresult.fail_msg or not nresult.payload:
continue
(errcode, msg) = nresult.payload.get(constants.NV_CLIENT_CERT, None)
if errcode is not None:
self._CertError(
"Client certificate of node '%s' failed validation: %s (code '%s')",
node.uuid, msg, errcode)
if not errcode:
digest = msg
if node.master_candidate:
if node.uuid in candidate_certs:
if digest != candidate_certs[node.uuid]:
self._CertError(
"Client certificate digest of master candidate '%s' does not"
" match its entry in the cluster's map of master candidate"
" certificates. Expected: %s Got: %s", node.uuid,
digest, candidate_certs[node.uuid])
else:
self._CertError(
"The master candidate '%s' does not have an entry in the"
" map of candidate certificates.", node.uuid)
if digest in candidate_certs.values():
self._CertError(
"Master candidate '%s' is using a certificate of another node.",
node.uuid)
else:
if node.uuid in candidate_certs:
self._CertError(
"Node '%s' is not a master candidate, but still listed in the"
" map of master candidate certificates.", node.uuid)
if (node.uuid not in candidate_certs and
digest in candidate_certs.values()):
self._CertError(
"Node '%s' is not a master candidate and is incorrectly using a"
" certificate of another node which is master candidate.",
node.uuid)
if self._cert_error_found:
self._CertError(rebuild_certs_msg)
def _VerifySshSetup(self, nodes, all_nvinfo):
"""Evaluates the verification results of the SSH setup and clutter test.
@param nodes: List of L{objects.Node} objects
@param all_nvinfo: RPC results
"""
for node in nodes:
if not node.offline:
nresult = all_nvinfo[node.uuid]
if nresult.fail_msg or not nresult.payload:
self._ErrorIf(True, constants.CV_ENODESSH, node.name,
"Could not verify the SSH setup of this node.")
return
for ssh_test in [constants.NV_SSH_SETUP, constants.NV_SSH_CLUTTER]:
result = nresult.payload.get(ssh_test, None)
error_msg = ""
if isinstance(result, list):
error_msg = " ".join(result)
self._ErrorIf(result,
constants.CV_ENODESSH, None, error_msg)
def _VerifyFiles(self, nodes, master_node_uuid, all_nvinfo,
(files_all, files_opt, files_mc, files_vm)):
"""Verifies file checksums collected from all nodes.
@param nodes: List of L{objects.Node} objects
@param master_node_uuid: UUID of master node
@param all_nvinfo: RPC results
"""
# Define functions determining which nodes to consider for a file
files2nodefn = [
(files_all, None),
(files_mc, lambda node: (node.master_candidate or
node.uuid == master_node_uuid)),
(files_vm, lambda node: node.vm_capable),
]
# Build mapping from filename to list of nodes which should have the file
nodefiles = {}
for (files, fn) in files2nodefn:
if fn is None:
filenodes = nodes
else:
filenodes = filter(fn, nodes)
nodefiles.update((filename, frozenset(fn.uuid for fn in filenodes))
for filename in files)
assert set(nodefiles) == (files_all | files_mc | files_vm)
fileinfo = dict((filename, {}) for filename in nodefiles)
ignore_nodes = set()
for node in nodes:
if node.offline:
ignore_nodes.add(node.uuid)
continue
nresult = all_nvinfo[node.uuid]
if nresult.fail_msg or not nresult.payload:
node_files = None
else:
fingerprints = nresult.payload.get(constants.NV_FILELIST, {})
node_files = dict((vcluster.LocalizeVirtualPath(key), value)
for (key, value) in fingerprints.items())
del fingerprints
test = not (node_files and isinstance(node_files, dict))
self._ErrorIf(test, constants.CV_ENODEFILECHECK, node.name,
"Node did not return file checksum data")
if test:
ignore_nodes.add(node.uuid)
continue
# Build per-checksum mapping from filename to nodes having it
for (filename, checksum) in node_files.items():
assert filename in nodefiles
fileinfo[filename].setdefault(checksum, set()).add(node.uuid)
for (filename, checksums) in fileinfo.items():
assert compat.all(len(i) > 10 for i in checksums), "Invalid checksum"
# Nodes having the file
with_file = frozenset(node_uuid
for node_uuids in fileinfo[filename].values()
for node_uuid in node_uuids) - ignore_nodes
expected_nodes = nodefiles[filename] - ignore_nodes
# Nodes missing file
missing_file = expected_nodes - with_file
if filename in files_opt:
# All or no nodes
self._ErrorIf(missing_file and missing_file != expected_nodes,
constants.CV_ECLUSTERFILECHECK, None,
"File %s is optional, but it must exist on all or no"
" nodes (not found on %s)",
filename,
utils.CommaJoin(utils.NiceSort(
self.cfg.GetNodeName(n) for n in missing_file)))
else:
self._ErrorIf(missing_file, constants.CV_ECLUSTERFILECHECK, None,
"File %s is missing from node(s) %s", filename,
utils.CommaJoin(utils.NiceSort(
self.cfg.GetNodeName(n) for n in missing_file)))
# Warn if a node has a file it shouldn't
unexpected = with_file - expected_nodes
self._ErrorIf(unexpected,
constants.CV_ECLUSTERFILECHECK, None,
"File %s should not exist on node(s) %s",
filename,
utils.CommaJoin(utils.NiceSort(
self.cfg.GetNodeName(n) for n in unexpected)))
# See if there are multiple versions of the file
test = len(checksums) > 1
if test:
variants = ["variant %s on %s" %
(idx + 1,
utils.CommaJoin(utils.NiceSort(
self.cfg.GetNodeName(n) for n in node_uuids)))
for (idx, (checksum, node_uuids)) in
enumerate(sorted(checksums.items()))]
else:
variants = []
self._ErrorIf(test, constants.CV_ECLUSTERFILECHECK, None,
"File %s found with %s different checksums (%s)",
filename, len(checksums), "; ".join(variants))
def _VerifyNodeDrbdHelper(self, ninfo, nresult, drbd_helper):
"""Verify the drbd helper.
"""
if drbd_helper:
helper_result = nresult.get(constants.NV_DRBDHELPER, None)
test = (helper_result is None)
self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
"no drbd usermode helper returned")
if helper_result:
status, payload = helper_result
test = not status
self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
"drbd usermode helper check unsuccessful: %s", payload)
test = status and (payload != drbd_helper)
self._ErrorIf(test, constants.CV_ENODEDRBDHELPER, ninfo.name,
"wrong drbd usermode helper: %s", payload)
@staticmethod
def _ComputeDrbdMinors(ninfo, instanceinfo, disks_info, drbd_map, error_if):
"""Gives the DRBD information in a map for a node.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param instanceinfo: the dict of instances
@param disks_info: the dict of disks
@param drbd_map: the DRBD map as returned by
L{ganeti.config.ConfigWriter.ComputeDRBDMap}
@type error_if: callable like L{_ErrorIf}
@param error_if: The error reporting function
@return: dict from minor number to (disk_uuid, instance_uuid, active)
"""
node_drbd = {}
for minor, disk_uuid in drbd_map[ninfo.uuid].items():
test = disk_uuid not in disks_info
error_if(test, constants.CV_ECLUSTERCFG, None,
"ghost disk '%s' in temporary DRBD map", disk_uuid)
# ghost disk should not be active, but otherwise we
# don't give double warnings (both ghost disk and
# unallocated minor in use)
if test:
node_drbd[minor] = (disk_uuid, None, False)
else:
disk_active = False
disk_instance = None
for (inst_uuid, inst) in instanceinfo.items():
if disk_uuid in inst.disks:
disk_active = inst.disks_active
disk_instance = inst_uuid
break
node_drbd[minor] = (disk_uuid, disk_instance, disk_active)
return node_drbd
def _VerifyNodeDrbd(self, ninfo, nresult, instanceinfo, disks_info,
drbd_helper, drbd_map):
"""Verifies and the node DRBD status.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param instanceinfo: the dict of instances
@param disks_info: the dict of disks
@param drbd_helper: the configured DRBD usermode helper
@param drbd_map: the DRBD map as returned by
L{ganeti.config.ConfigWriter.ComputeDRBDMap}
"""
self._VerifyNodeDrbdHelper(ninfo, nresult, drbd_helper)
# compute the DRBD minors
node_drbd = self._ComputeDrbdMinors(ninfo, instanceinfo, disks_info,
drbd_map, self._ErrorIf)
# and now check them
used_minors = nresult.get(constants.NV_DRBDLIST, [])
test = not isinstance(used_minors, (tuple, list))
self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
"cannot parse drbd status file: %s", str(used_minors))
if test:
# we cannot check drbd status
return
for minor, (disk_uuid, inst_uuid, must_exist) in node_drbd.items():
test = minor not in used_minors and must_exist
if inst_uuid is not None:
attached = "(attached in instance '%s')" % \
self.cfg.GetInstanceName(inst_uuid)
else:
attached = "(detached)"
self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
"drbd minor %d of disk %s %s is not active",
minor, disk_uuid, attached)
for minor in used_minors:
test = minor not in node_drbd
self._ErrorIf(test, constants.CV_ENODEDRBD, ninfo.name,
"unallocated drbd minor %d is in use", minor)
def _UpdateNodeOS(self, ninfo, nresult, nimg):
"""Builds the node OS structures.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param nimg: the node image object
"""
remote_os = nresult.get(constants.NV_OSLIST, None)
test = (not isinstance(remote_os, list) or
not compat.all(isinstance(v, list) and len(v) == 8
for v in remote_os))
self._ErrorIf(test, constants.CV_ENODEOS, ninfo.name,
"node hasn't returned valid OS data")
nimg.os_fail = test
if test:
return
os_dict = {}
for (name, os_path, status, diagnose,
variants, parameters, api_ver,
trusted) in nresult[constants.NV_OSLIST]:
if name not in os_dict:
os_dict[name] = []
# parameters is a list of lists instead of list of tuples due to
# JSON lacking a real tuple type, fix it:
parameters = [tuple(v) for v in parameters]
os_dict[name].append((os_path, status, diagnose,
set(variants), set(parameters), set(api_ver),
trusted))
nimg.oslist = os_dict
def _VerifyNodeOS(self, ninfo, nimg, base):
"""Verifies the node OS list.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nimg: the node image object
@param base: the 'template' node we match against (e.g. from the master)
"""
assert not nimg.os_fail, "Entered _VerifyNodeOS with failed OS rpc?"
beautify_params = lambda l: ["%s: %s" % (k, v) for (k, v) in l]
for os_name, os_data in nimg.oslist.items():
assert os_data, "Empty OS status for OS %s?!" % os_name
f_path, f_status, f_diag, f_var, f_param, f_api, f_trusted = os_data[0]
self._ErrorIf(not f_status, constants.CV_ENODEOS, ninfo.name,
"Invalid OS %s (located at %s): %s",
os_name, f_path, f_diag)
self._ErrorIf(len(os_data) > 1, constants.CV_ENODEOS, ninfo.name,
"OS '%s' has multiple entries"
" (first one shadows the rest): %s",
os_name, utils.CommaJoin([v[0] for v in os_data]))
# comparisons with the 'base' image
test = os_name not in base.oslist
self._ErrorIf(test, constants.CV_ENODEOS, ninfo.name,
"Extra OS %s not present on reference node (%s)",
os_name, self.cfg.GetNodeName(base.uuid))
if test:
continue
assert base.oslist[os_name], "Base node has empty OS status?"
_, b_status, _, b_var, b_param, b_api, b_trusted = base.oslist[os_name][0]
if not b_status:
# base OS is invalid, skipping
continue
for kind, a, b in [("API version", f_api, b_api),
("variants list", f_var, b_var),
("parameters", beautify_params(f_param),
beautify_params(b_param))]:
self._ErrorIf(a != b, constants.CV_ENODEOS, ninfo.name,
"OS %s for %s differs from reference node %s:"
" [%s] vs. [%s]", kind, os_name,
self.cfg.GetNodeName(base.uuid),
utils.CommaJoin(sorted(a)), utils.CommaJoin(sorted(b)))
for kind, a, b in [("trusted", f_trusted, b_trusted)]:
self._ErrorIf(a != b, constants.CV_ENODEOS, ninfo.name,
"OS %s for %s differs from reference node %s:"
" %s vs. %s", kind, os_name,
self.cfg.GetNodeName(base.uuid), a, b)
# check any missing OSes
missing = set(base.oslist.keys()).difference(nimg.oslist.keys())
self._ErrorIf(missing, constants.CV_ENODEOS, ninfo.name,
"OSes present on reference node %s"
" but missing on this node: %s",
self.cfg.GetNodeName(base.uuid), utils.CommaJoin(missing))
def _VerifyAcceptedFileStoragePaths(self, ninfo, nresult, is_master):
"""Verifies paths in L{pathutils.FILE_STORAGE_PATHS_FILE}.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@type is_master: bool
@param is_master: Whether node is the master node
"""
cluster = self.cfg.GetClusterInfo()
if (is_master and
(cluster.IsFileStorageEnabled() or
cluster.IsSharedFileStorageEnabled())):
try:
fspaths = nresult[constants.NV_ACCEPTED_STORAGE_PATHS]
except KeyError:
# This should never happen
self._ErrorIf(True, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
"Node did not return forbidden file storage paths")
else:
self._ErrorIf(fspaths, constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
"Found forbidden file storage paths: %s",
utils.CommaJoin(fspaths))
else:
self._ErrorIf(constants.NV_ACCEPTED_STORAGE_PATHS in nresult,
constants.CV_ENODEFILESTORAGEPATHS, ninfo.name,
"Node should not have returned forbidden file storage"
" paths")
def _VerifyStoragePaths(self, ninfo, nresult, file_disk_template,
verify_key, error_key):
"""Verifies (file) storage paths.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@type file_disk_template: string
@param file_disk_template: file-based disk template, whose directory
is supposed to be verified
@type verify_key: string
@param verify_key: key for the verification map of this file
verification step
@param error_key: error key to be added to the verification results
in case something goes wrong in this verification step
"""
assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
))
cluster = self.cfg.GetClusterInfo()
if cluster.IsDiskTemplateEnabled(file_disk_template):
self._ErrorIf(
verify_key in nresult,
error_key, ninfo.name,
"The configured %s storage path is unusable: %s" %
(file_disk_template, nresult.get(verify_key)))
def _VerifyFileStoragePaths(self, ninfo, nresult):
"""Verifies (file) storage paths.
@see: C{_VerifyStoragePaths}
"""
self._VerifyStoragePaths(
ninfo, nresult, constants.DT_FILE,
constants.NV_FILE_STORAGE_PATH,
constants.CV_ENODEFILESTORAGEPATHUNUSABLE)
def _VerifySharedFileStoragePaths(self, ninfo, nresult):
"""Verifies (file) storage paths.
@see: C{_VerifyStoragePaths}
"""
self._VerifyStoragePaths(
ninfo, nresult, constants.DT_SHARED_FILE,
constants.NV_SHARED_FILE_STORAGE_PATH,
constants.CV_ENODESHAREDFILESTORAGEPATHUNUSABLE)
def _VerifyGlusterStoragePaths(self, ninfo, nresult):
"""Verifies (file) storage paths.
@see: C{_VerifyStoragePaths}
"""
self._VerifyStoragePaths(
ninfo, nresult, constants.DT_GLUSTER,
constants.NV_GLUSTER_STORAGE_PATH,
constants.CV_ENODEGLUSTERSTORAGEPATHUNUSABLE)
def _VerifyOob(self, ninfo, nresult):
"""Verifies out of band functionality of a node.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
"""
# We just have to verify the paths on master and/or master candidates
# as the oob helper is invoked on the master
if ((ninfo.master_candidate or ninfo.master_capable) and
constants.NV_OOB_PATHS in nresult):
for path_result in nresult[constants.NV_OOB_PATHS]:
self._ErrorIf(path_result, constants.CV_ENODEOOBPATH,
ninfo.name, path_result)
def _UpdateNodeVolumes(self, ninfo, nresult, nimg, vg_name):
"""Verifies and updates the node volume data.
This function will update a L{NodeImage}'s internal structures
with data from the remote call.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param nimg: the node image object
@param vg_name: the configured VG name
"""
nimg.lvm_fail = True
lvdata = nresult.get(constants.NV_LVLIST, "Missing LV data")
if vg_name is None:
pass
elif isinstance(lvdata, basestring):
self._ErrorIf(True, constants.CV_ENODELVM, ninfo.name,
"LVM problem on node: %s", utils.SafeEncode(lvdata))
elif not isinstance(lvdata, dict):
self._ErrorIf(True, constants.CV_ENODELVM, ninfo.name,
"rpc call to node failed (lvlist)")
else:
nimg.volumes = lvdata
nimg.lvm_fail = False
def _UpdateNodeInstances(self, ninfo, nresult, nimg):
"""Verifies and updates the node instance list.
If the listing was successful, then updates this node's instance
list. Otherwise, it marks the RPC call as failed for the instance
list key.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param nimg: the node image object
"""
idata = nresult.get(constants.NV_INSTANCELIST, None)
test = not isinstance(idata, list)
self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
"rpc call to node failed (instancelist): %s",
utils.SafeEncode(str(idata)))
if test:
nimg.hyp_fail = True
else:
nimg.instances = [uuid for (uuid, _) in
self.cfg.GetMultiInstanceInfoByName(idata)]
def _UpdateNodeInfo(self, ninfo, nresult, nimg, vg_name):
"""Verifies and computes a node information map
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param nimg: the node image object
@param vg_name: the configured VG name
"""
# try to read free memory (from the hypervisor)
hv_info = nresult.get(constants.NV_HVINFO, None)
test = not isinstance(hv_info, dict) or "memory_free" not in hv_info
self._ErrorIf(test, constants.CV_ENODEHV, ninfo.name,
"rpc call to node failed (hvinfo)")
if not test:
try:
nimg.mfree = int(hv_info["memory_free"])
except (ValueError, TypeError):
self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
"node returned invalid nodeinfo, check hypervisor")
# FIXME: devise a free space model for file based instances as well
if vg_name is not None:
test = (constants.NV_VGLIST not in nresult or
vg_name not in nresult[constants.NV_VGLIST])
self._ErrorIf(test, constants.CV_ENODELVM, ninfo.name,
"node didn't return data for the volume group '%s'"
" - it is either missing or broken", vg_name)
if not test:
try:
nimg.dfree = int(nresult[constants.NV_VGLIST][vg_name])
except (ValueError, TypeError):
self._ErrorIf(True, constants.CV_ENODERPC, ninfo.name,
"node returned invalid LVM info, check LVM status")
def _CollectDiskInfo(self, node_uuids, node_image, instanceinfo):
"""Gets per-disk status information for all instances.
@type node_uuids: list of strings
@param node_uuids: Node UUIDs
@type node_image: dict of (UUID, L{objects.Node})
@param node_image: Node objects
@type instanceinfo: dict of (UUID, L{objects.Instance})
@param instanceinfo: Instance objects
@rtype: {instance: {node: [(succes, payload)]}}
@return: a dictionary of per-instance dictionaries with nodes as
keys and disk information as values; the disk information is a
list of tuples (success, payload)
"""
node_disks = {}
node_disks_dev_inst_only = {}
diskless_instances = set()
nodisk_instances = set()
for nuuid in node_uuids:
node_inst_uuids = list(itertools.chain(node_image[nuuid].pinst,
node_image[nuuid].sinst))
diskless_instances.update(uuid for uuid in node_inst_uuids
if not instanceinfo[uuid].disks)
disks = [(inst_uuid, disk)
for inst_uuid in node_inst_uuids
for disk in self.cfg.GetInstanceDisks(inst_uuid)]
if not disks:
nodisk_instances.update(uuid for uuid in node_inst_uuids
if instanceinfo[uuid].disks)
# No need to collect data
continue
node_disks[nuuid] = disks
# _AnnotateDiskParams makes already copies of the disks
dev_inst_only = []
for (inst_uuid, dev) in disks:
(anno_disk,) = AnnotateDiskParams(instanceinfo[inst_uuid], [dev],
self.cfg)
dev_inst_only.append((anno_disk, instanceinfo[inst_uuid]))
node_disks_dev_inst_only[nuuid] = dev_inst_only
assert len(node_disks) == len(node_disks_dev_inst_only)
# Collect data from all nodes with disks
result = self.rpc.call_blockdev_getmirrorstatus_multi(
node_disks.keys(), node_disks_dev_inst_only)
assert len(result) == len(node_disks)
instdisk = {}
for (nuuid, nres) in result.items():
node = self.cfg.GetNodeInfo(nuuid)
disks = node_disks[node.uuid]
if nres.offline:
# No data from this node
data = len(disks) * [(False, "node offline")]
else:
msg = nres.fail_msg
self._ErrorIf(msg, constants.CV_ENODERPC, node.name,
"while getting disk information: %s", msg)
if msg:
# No data from this node
data = len(disks) * [(False, msg)]
else:
data = []
for idx, i in enumerate(nres.payload):
if isinstance(i, (tuple, list)) and len(i) == 2:
data.append(i)
else:
logging.warning("Invalid result from node %s, entry %d: %s",
node.name, idx, i)
data.append((False, "Invalid result from the remote node"))
for ((inst_uuid, _), status) in zip(disks, data):
instdisk.setdefault(inst_uuid, {}).setdefault(node.uuid, []) \
.append(status)
# Add empty entries for diskless instances.
for inst_uuid in diskless_instances:
assert inst_uuid not in instdisk
instdisk[inst_uuid] = {}
# ...and disk-full instances that happen to have no disks
for inst_uuid in nodisk_instances:
assert inst_uuid not in instdisk
instdisk[inst_uuid] = {}
assert compat.all(len(statuses) == len(instanceinfo[inst].disks) and
len(nuuids) <= len(
self.cfg.GetInstanceNodes(instanceinfo[inst].uuid)) and
compat.all(isinstance(s, (tuple, list)) and
len(s) == 2 for s in statuses)
for inst, nuuids in instdisk.items()
for nuuid, statuses in nuuids.items())
if __debug__:
instdisk_keys = set(instdisk)
instanceinfo_keys = set(instanceinfo)
assert instdisk_keys == instanceinfo_keys, \
("instdisk keys (%s) do not match instanceinfo keys (%s)" %
(instdisk_keys, instanceinfo_keys))
return instdisk
@staticmethod
def _SshNodeSelector(group_uuid, all_nodes):
"""Create endless iterators for all potential SSH check hosts.
"""
nodes = [node for node in all_nodes
if (node.group != group_uuid and
not node.offline)]
keyfunc = operator.attrgetter("group")
return map(itertools.cycle,
[sorted(n.name for n in names)
for _, names in itertools.groupby(sorted(nodes, key=keyfunc),
keyfunc)])
@classmethod
def _SelectSshCheckNodes(cls, group_nodes, group_uuid, all_nodes):
"""Choose which nodes should talk to which other nodes.
We will make nodes contact all nodes in their group, and one node from
every other group.
@rtype: tuple of (string, dict of strings to list of strings, string)
@return: a tuple containing the list of all online nodes, a dictionary
mapping node names to additional nodes of other node groups to which
connectivity should be tested, and a list of all online master
candidates
@warning: This algorithm has a known issue if one node group is much
smaller than others (e.g. just one node). In such a case all other
nodes will talk to the single node.
"""
online_nodes = sorted(node.name for node in group_nodes if not node.offline)
online_mcs = sorted(node.name for node in group_nodes
if (node.master_candidate and not node.offline))
sel = cls._SshNodeSelector(group_uuid, all_nodes)
return (online_nodes,
dict((name, sorted([i.next() for i in sel]))
for name in online_nodes),
online_mcs)
def _PrepareSshSetupCheck(self):
"""Prepare the input data for the SSH setup verification.
"""
all_nodes_info = self.cfg.GetAllNodesInfo()
potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
node_status = [
(uuid, node_info.name, node_info.master_candidate,
node_info.name in potential_master_candidates, not node_info.offline)
for (uuid, node_info) in all_nodes_info.items()]
return node_status
def BuildHooksEnv(self):
"""Build hooks env.
Cluster-Verify hooks just ran in the post phase and their failure makes
the output be logged in the verify output and the verification to fail.
"""
env = {
"CLUSTER_TAGS": " ".join(self.cfg.GetClusterInfo().GetTags()),
}
env.update(("NODE_TAGS_%s" % node.name, " ".join(node.GetTags()))
for node in self.my_node_info.values())
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
return ([], list(self.my_node_info.keys()))
@staticmethod
def _VerifyOtherNotes(feedback_fn, i_non_redundant, i_non_a_balanced,
i_offline, n_offline, n_drained):
feedback_fn("* Other Notes")
if i_non_redundant:
feedback_fn(" - NOTICE: %d non-redundant instance(s) found."
% len(i_non_redundant))
if i_non_a_balanced:
feedback_fn(" - NOTICE: %d non-auto-balanced instance(s) found."
% len(i_non_a_balanced))
if i_offline:
feedback_fn(" - NOTICE: %d offline instance(s) found." % i_offline)
if n_offline:
feedback_fn(" - NOTICE: %d offline node(s) found." % n_offline)
if n_drained:
feedback_fn(" - NOTICE: %d drained node(s) found." % n_drained)
def _VerifyExclusionTags(self, nodename, pinst, ctags):
"""Verify that all instances have different exclusion tags.
@type nodename: string
@param nodename: the name of the node for which the check is done
@type pinst: list of string
@param pinst: list of UUIDs of those instances having the given node
as primary node
@type ctags: list of string
@param ctags: tags of the cluster
"""
exclusion_prefixes = utils.GetExclusionPrefixes(ctags)
tags_seen = set([])
conflicting_tags = set([])
for iuuid in pinst:
allitags = self.my_inst_info[iuuid].tags
if allitags is None:
allitags = []
itags = set([tag for tag in allitags
if utils.IsGoodTag(exclusion_prefixes, tag)])
conflicts = itags.intersection(tags_seen)
if len(conflicts) > 0:
conflicting_tags = conflicting_tags.union(conflicts)
tags_seen = tags_seen.union(itags)
self._ErrorIf(len(conflicting_tags) > 0, constants.CV_EEXTAGS, nodename,
"Tags where there is more than one instance: %s",
list(conflicting_tags), code=constants.CV_WARNING)
def Exec(self, feedback_fn): # pylint: disable=R0915
"""Verify integrity of the node group, performing various test on nodes.
"""
# This method has too many local variables. pylint: disable=R0914
feedback_fn("* Verifying group '%s'" % self.group_info.name)
if not self.my_node_uuids:
# empty node group
feedback_fn("* Empty node group, skipping verification")
return True
self.bad = False
verbose = self.op.verbose
self._feedback_fn = feedback_fn
vg_name = self.cfg.GetVGName()
drbd_helper = self.cfg.GetDRBDHelper()
cluster = self.cfg.GetClusterInfo()
hypervisors = cluster.enabled_hypervisors
node_data_list = self.my_node_info.values()
i_non_redundant = [] # Non redundant instances
i_non_a_balanced = [] # Non auto-balanced instances
i_offline = 0 # Count of offline instances
n_offline = 0 # Count of offline nodes
n_drained = 0 # Count of nodes being drained
node_vol_should = {}
# FIXME: verify OS list
# File verification
filemap = ComputeAncillaryFiles(cluster, False)
# do local checksums
master_node_uuid = self.master_node = self.cfg.GetMasterNode()
master_ip = self.cfg.GetMasterIP()
online_master_candidates = sorted(
node.name for node in node_data_list
if (node.master_candidate and not node.offline))
feedback_fn("* Gathering data (%d nodes)" % len(self.my_node_uuids))
user_scripts = []
if self.cfg.GetUseExternalMipScript():
user_scripts.append(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
online_nodes = [(node.name, node.primary_ip, node.secondary_ip)
for node in node_data_list if not node.offline]
node_nettest_params = (online_nodes, online_master_candidates)
node_verify_param = {
constants.NV_FILELIST:
[vcluster.MakeVirtualPath(f)
for f in utils.UniqueSequence(filename
for files in filemap
for filename in files)],
constants.NV_NODELIST:
self._SelectSshCheckNodes(node_data_list, self.group_uuid,
self.all_node_info.values()),
constants.NV_HYPERVISOR: hypervisors,
constants.NV_HVPARAMS:
_GetAllHypervisorParameters(cluster, self.all_inst_info.values()),
constants.NV_NODENETTEST: node_nettest_params,
constants.NV_INSTANCELIST: hypervisors,
constants.NV_VERSION: None,
constants.NV_HVINFO: self.cfg.GetHypervisorType(),
constants.NV_NODESETUP: None,
constants.NV_TIME: None,
constants.NV_MASTERIP: (self.cfg.GetMasterNodeName(), master_ip,
online_master_candidates),
constants.NV_OSLIST: None,
constants.NV_NONVMNODES: self.cfg.GetNonVmCapableNodeNameList(),
constants.NV_USERSCRIPTS: user_scripts,
constants.NV_CLIENT_CERT: None,
}
if self.cfg.GetClusterInfo().modify_ssh_setup:
node_verify_param[constants.NV_SSH_SETUP] = \
(self._PrepareSshSetupCheck(), self.cfg.GetClusterInfo().ssh_key_type)
if self.op.verify_clutter:
node_verify_param[constants.NV_SSH_CLUTTER] = True
if vg_name is not None:
node_verify_param[constants.NV_VGLIST] = None
node_verify_param[constants.NV_LVLIST] = vg_name
node_verify_param[constants.NV_PVLIST] = [vg_name]
if cluster.IsDiskTemplateEnabled(constants.DT_DRBD8):
if drbd_helper:
node_verify_param[constants.NV_DRBDVERSION] = None
node_verify_param[constants.NV_DRBDLIST] = None
node_verify_param[constants.NV_DRBDHELPER] = drbd_helper
if cluster.IsFileStorageEnabled() or \
cluster.IsSharedFileStorageEnabled():
# Load file storage paths only from master node
node_verify_param[constants.NV_ACCEPTED_STORAGE_PATHS] = \
self.cfg.GetMasterNodeName()
if cluster.IsFileStorageEnabled():
node_verify_param[constants.NV_FILE_STORAGE_PATH] = \
cluster.file_storage_dir
if cluster.IsSharedFileStorageEnabled():
node_verify_param[constants.NV_SHARED_FILE_STORAGE_PATH] = \
cluster.shared_file_storage_dir
# bridge checks
# FIXME: this needs to be changed per node-group, not cluster-wide
bridges = set()
default_nicpp = cluster.nicparams[constants.PP_DEFAULT]
if default_nicpp[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
bridges.add(default_nicpp[constants.NIC_LINK])
for inst_uuid in self.my_inst_info.values():
for nic in inst_uuid.nics:
full_nic = cluster.SimpleFillNIC(nic.nicparams)
if full_nic[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
bridges.add(full_nic[constants.NIC_LINK])
if bridges:
node_verify_param[constants.NV_BRIDGES] = list(bridges)
# Build our expected cluster state
node_image = dict((node.uuid, self.NodeImage(offline=node.offline,
uuid=node.uuid,
vm_capable=node.vm_capable))
for node in node_data_list)
# Gather OOB paths
oob_paths = []
for node in self.all_node_info.values():
path = SupportsOob(self.cfg, node)
if path and path not in oob_paths:
oob_paths.append(path)
if oob_paths:
node_verify_param[constants.NV_OOB_PATHS] = oob_paths
for inst_uuid in self.my_inst_uuids:
instance = self.my_inst_info[inst_uuid]
if instance.admin_state == constants.ADMINST_OFFLINE:
i_offline += 1
inst_nodes = self.cfg.GetInstanceNodes(instance.uuid)
for nuuid in inst_nodes:
if nuuid not in node_image:
gnode = self.NodeImage(uuid=nuuid)
gnode.ghost = (nuuid not in self.all_node_info)
node_image[nuuid] = gnode
self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
pnode = instance.primary_node
node_image[pnode].pinst.append(instance.uuid)
for snode in self.cfg.GetInstanceSecondaryNodes(instance.uuid):
nimg = node_image[snode]
nimg.sinst.append(instance.uuid)
if pnode not in nimg.sbp:
nimg.sbp[pnode] = []
nimg.sbp[pnode].append(instance.uuid)
es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
self.my_node_info.keys())
# The value of exclusive_storage should be the same across the group, so if
# it's True for at least a node, we act as if it were set for all the nodes
self._exclusive_storage = compat.any(es_flags.values())
if self._exclusive_storage:
node_verify_param[constants.NV_EXCLUSIVEPVS] = True
# At this point, we have the in-memory data structures complete,
# except for the runtime information, which we'll gather next
# NOTE: Here we lock the configuration for the duration of RPC calls,
# which means that the cluster configuration changes are blocked during
# this period.
# This is something that should be done only exceptionally and only for
# justified cases!
# In this case, we need the lock as we can only verify the integrity of
# configuration files on MCs only if we know nobody else is modifying it.
# FIXME: The check for integrity of config.data should be moved to
# WConfD, which is the only one who can otherwise ensure nobody
# will modify the configuration during the check.
with self.cfg.GetConfigManager(shared=True, forcelock=True):
feedback_fn("* Gathering information about nodes (%s nodes)" %
len(self.my_node_uuids))
# Force the configuration to be fully distributed before doing any tests
self.cfg.FlushConfigGroup(self.group_uuid)
# Due to the way our RPC system works, exact response times cannot be
# guaranteed (e.g. a broken node could run into a timeout). By keeping
# the time before and after executing the request, we can at least have
# a time window.
nvinfo_starttime = time.time()
# Get lock on the configuration so that nobody modifies it concurrently.
# Otherwise it can be modified by other jobs, failing the consistency
# test.
# NOTE: This is an exceptional situation, we should otherwise avoid
# locking the configuration for something but very fast, pure operations.
cluster_name = self.cfg.GetClusterName()
hvparams = self.cfg.GetClusterInfo().hvparams
all_nvinfo = self.rpc.call_node_verify(self.my_node_uuids,
node_verify_param,
cluster_name,
hvparams)
nvinfo_endtime = time.time()
if self.extra_lv_nodes and vg_name is not None:
feedback_fn("* Gathering information about extra nodes (%s nodes)" %
len(self.extra_lv_nodes))
extra_lv_nvinfo = \
self.rpc.call_node_verify(self.extra_lv_nodes,
{constants.NV_LVLIST: vg_name},
self.cfg.GetClusterName(),
self.cfg.GetClusterInfo().hvparams)
else:
extra_lv_nvinfo = {}
# If not all nodes are being checked, we need to make sure the master
# node and a non-checked vm_capable node are in the list.
absent_node_uuids = set(self.all_node_info).difference(self.my_node_info)
if absent_node_uuids:
vf_nvinfo = all_nvinfo.copy()
vf_node_info = list(self.my_node_info.values())
additional_node_uuids = []
if master_node_uuid not in self.my_node_info:
additional_node_uuids.append(master_node_uuid)
vf_node_info.append(self.all_node_info[master_node_uuid])
# Add the first vm_capable node we find which is not included,
# excluding the master node (which we already have)
for node_uuid in absent_node_uuids:
nodeinfo = self.all_node_info[node_uuid]
if (nodeinfo.vm_capable and not nodeinfo.offline and
node_uuid != master_node_uuid):
additional_node_uuids.append(node_uuid)
vf_node_info.append(self.all_node_info[node_uuid])
break
key = constants.NV_FILELIST
feedback_fn("* Gathering information about the master node")
vf_nvinfo.update(self.rpc.call_node_verify(
additional_node_uuids, {key: node_verify_param[key]},
self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams))
else:
vf_nvinfo = all_nvinfo
vf_node_info = self.my_node_info.values()
all_drbd_map = self.cfg.ComputeDRBDMap()
feedback_fn("* Gathering disk information (%s nodes)" %
len(self.my_node_uuids))
instdisk = self._CollectDiskInfo(self.my_node_info.keys(), node_image,
self.my_inst_info)
feedback_fn("* Verifying configuration file consistency")
self._VerifyClientCertificates(self.my_node_info.values(), all_nvinfo)
if self.cfg.GetClusterInfo().modify_ssh_setup:
self._VerifySshSetup(self.my_node_info.values(), all_nvinfo)
self._VerifyFiles(vf_node_info, master_node_uuid, vf_nvinfo, filemap)
feedback_fn("* Verifying node status")
refos_img = None
for node_i in node_data_list:
nimg = node_image[node_i.uuid]
if node_i.offline:
if verbose:
feedback_fn("* Skipping offline node %s" % (node_i.name,))
n_offline += 1
continue
if node_i.uuid == master_node_uuid:
ntype = "master"
elif node_i.master_candidate:
ntype = "master candidate"
elif node_i.drained:
ntype = "drained"
n_drained += 1
else:
ntype = "regular"
if verbose:
feedback_fn("* Verifying node %s (%s)" % (node_i.name, ntype))
msg = all_nvinfo[node_i.uuid].fail_msg
self._ErrorIf(msg, constants.CV_ENODERPC, node_i.name,
"while contacting node: %s", msg)
if msg:
nimg.rpc_fail = True
continue
nresult = all_nvinfo[node_i.uuid].payload
nimg.call_ok = self._VerifyNode(node_i, nresult)
self._VerifyNodeTime(node_i, nresult, nvinfo_starttime, nvinfo_endtime)
self._VerifyNodeNetwork(node_i, nresult)
self._VerifyNodeUserScripts(node_i, nresult)
self._VerifyOob(node_i, nresult)
self._VerifyAcceptedFileStoragePaths(node_i, nresult,
node_i.uuid == master_node_uuid)
self._VerifyFileStoragePaths(node_i, nresult)
self._VerifySharedFileStoragePaths(node_i, nresult)
self._VerifyGlusterStoragePaths(node_i, nresult)
if nimg.vm_capable:
self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)
if constants.DT_DRBD8 in cluster.enabled_disk_templates:
self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info,
self.all_disks_info, drbd_helper, all_drbd_map)
if (constants.DT_PLAIN in cluster.enabled_disk_templates) or \
(constants.DT_DRBD8 in cluster.enabled_disk_templates):
self._UpdateNodeVolumes(node_i, nresult, nimg, vg_name)
self._UpdateNodeInstances(node_i, nresult, nimg)
self._UpdateNodeInfo(node_i, nresult, nimg, vg_name)
self._UpdateNodeOS(node_i, nresult, nimg)
if not nimg.os_fail:
if refos_img is None:
refos_img = nimg
self._VerifyNodeOS(node_i, nimg, refos_img)
self._VerifyNodeBridges(node_i, nresult, bridges)
# Check whether all running instances are primary for the node. (This
# can no longer be done from _VerifyInstance below, since some of the
# wrong instances could be from other node groups.)
non_primary_inst_uuids = set(nimg.instances).difference(nimg.pinst)
for inst_uuid in non_primary_inst_uuids:
test = inst_uuid in self.all_inst_info
self._ErrorIf(test, constants.CV_EINSTANCEWRONGNODE,
self.cfg.GetInstanceName(inst_uuid),
"instance should not run on node %s", node_i.name)
self._ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
"node is running unknown instance %s", inst_uuid)
self._VerifyExclusionTags(node_i.name, nimg.pinst, cluster.tags)
self._VerifyGroupDRBDVersion(all_nvinfo)
self._VerifyGroupLVM(node_image, vg_name)
for node_uuid, result in extra_lv_nvinfo.items():
self._UpdateNodeVolumes(self.all_node_info[node_uuid], result.payload,
node_image[node_uuid], vg_name)
feedback_fn("* Verifying instance status")
for inst_uuid in self.my_inst_uuids:
instance = self.my_inst_info[inst_uuid]
if verbose:
feedback_fn("* Verifying instance %s" % instance.name)
self._VerifyInstance(instance, node_image, instdisk[inst_uuid])
# If the instance is not fully redundant we cannot survive losing its
# primary node, so we are not N+1 compliant.
inst_disks = self.cfg.GetInstanceDisks(instance.uuid)
if not utils.AllDiskOfType(inst_disks, constants.DTS_MIRRORED):
i_non_redundant.append(instance)
if not cluster.FillBE(instance)[constants.BE_AUTO_BALANCE]:
i_non_a_balanced.append(instance)
feedback_fn("* Verifying orphan volumes")
reserved = utils.FieldSet(*cluster.reserved_lvs)
# We will get spurious "unknown volume" warnings if any node of this group
# is secondary for an instance whose primary is in another group. To avoid
# them, we find these instances and add their volumes to node_vol_should.
for instance in self.all_inst_info.values():
for secondary in self.cfg.GetInstanceSecondaryNodes(instance.uuid):
if (secondary in self.my_node_info
and instance.uuid not in self.my_inst_info):
self.cfg.GetInstanceLVsByNode(instance.uuid, lvmap=node_vol_should)
break
self._VerifyOrphanVolumes(vg_name, node_vol_should, node_image, reserved)
if constants.VERIFY_NPLUSONE_MEM not in self.op.skip_checks:
feedback_fn("* Verifying N+1 Memory redundancy")
self._VerifyNPlusOneMemory(node_image, self.my_inst_info)
self._VerifyOtherNotes(feedback_fn, i_non_redundant, i_non_a_balanced,
i_offline, n_offline, n_drained)
return not self.bad
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result):
"""Analyze the post-hooks' result
This method analyses the hook result, handles it, and sends some
nicely-formatted feedback back to the user.
@param phase: one of L{constants.HOOKS_PHASE_POST} or
L{constants.HOOKS_PHASE_PRE}; it denotes the hooks phase
@param hooks_results: the results of the multi-node hooks rpc call
@param feedback_fn: function used send feedback back to the caller
@param lu_result: previous Exec result
@return: the new Exec result, based on the previous result
and hook results
"""
# We only really run POST phase hooks, only for non-empty groups,
# and are only interested in their results
if not self.my_node_uuids:
# empty node group
pass
elif phase == constants.HOOKS_PHASE_POST:
# Used to change hooks' output to proper indentation
feedback_fn("* Hooks Results")
assert hooks_results, "invalid result from hooks"
for node_name in hooks_results:
res = hooks_results[node_name]
msg = res.fail_msg
test = msg and not res.offline
self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
"Communication failure in hooks execution: %s", msg)
if test:
lu_result = False
continue
if res.offline:
# No need to investigate payload if node is offline
continue
for script, hkr, output in res.payload:
test = hkr == constants.HKR_FAIL
self._ErrorIf(test, constants.CV_ENODEHOOKS, node_name,
"Script %s failed, output:", script)
if test:
output = self._HOOKS_INDENT_RE.sub(" ", output)
feedback_fn("%s" % output)
lu_result = False
return lu_result
| yiannist/ganeti | lib/cmdlib/cluster/verify.py | Python | bsd-2-clause | 91,184 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write("wrapper.py",
"""import os
import sys
open('%s', 'wb').write("wrapper.py\\n")
os.system(" ".join(sys.argv[1:]))
""" % test.workpath('wrapper.out').replace('\\', '\\\\'))
test.write('SConstruct', """
foo = Environment(LIBS = ['foo'], LIBPATH = ['.'])
ar = foo.Dictionary('AR')
bar = Environment(LIBS = ['bar'], LIBPATH = ['.'], AR = r'%(_python_)s wrapper.py ' + ar)
foo.Library(target = 'foo', source = 'foo.c')
bar.Library(target = 'bar', source = 'bar.c')
obj = foo.Object('main', 'main.c')
foo.Program(target = 'f', source = obj)
bar.Program(target = 'b', source = obj)
""" % locals())
test.write('foo.c', r"""
#include <stdio.h>
void
library_function(void)
{
printf("foo.c\n");
}
""")
test.write('bar.c', r"""
#include <stdio.h>
void
library_function(void)
{
printf("bar.c\n");
}
""")
test.write('main.c', r"""
#include <stdio.h>
#include <stdlib.h>
extern void
library_function(void);
int
main(int argc, char *argv[])
{
argv[argc++] = "--";
library_function();
exit (0);
}
""")
test.run(arguments = 'f' + _exe,
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
test.fail_test(os.path.exists(test.workpath('wrapper.out')))
test.run(arguments = 'b' + _exe,
stderr=TestSCons.noisy_ar,
match=TestSCons.match_re_dotall)
test.fail_test(test.read('wrapper.out') != "wrapper.py\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| timj/scons | test/AR/AR.py | Python | mit | 2,795 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 0, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_MovingMedian/cycle_30/ar_/test_artificial_128_RelativeDifference_MovingMedian_30__0.py | Python | bsd-3-clause | 276 |
import datetime
from django.utils.html import linebreaks
from django.contrib.sites.models import Site
def camelcase(name):
return ''.join(x.capitalize() or ' ' for x in name.split(' '))
def camelcase_lower(name):
pname = camelcase(name)
return pname[0].lower() + pname[1:]
def split_thousands(n, sep=','):
s = str(n)
if len(s) <= 3: return s
return split_thousands(s[:-3], sep) + sep + s[-3:]
def dfs(node, all_nodes, depth):
"""
Performs a recursive depth-first search starting at ``node``.
"""
to_return = [node,]
for subnode in all_nodes:
if subnode.parent and subnode.parent.id == node.id:
to_return.extend(dfs(subnode, all_nodes, depth+1))
return to_return
def flattened_children(node, all_nodes, to_return):
to_return.append(node)
for subnode in all_nodes:
if subnode.parent and subnode.parent.id == node.id:
flattened_children(subnode, all_nodes, to_return)
return to_return
def flattened_children_by_association(node, all_associations, to_return): #works only for agents
#todo: figure out why this failed when AAs were ordered by from_agent
#import pdb; pdb.set_trace()
to_return.append(node)
for association in all_associations:
#if association.has_associate.id == node.id:
# import pdb; pdb.set_trace()
if association.has_associate.id == node.id and association.association_type.association_behavior == "child":
flattened_children_by_association(association.is_associate, all_associations, to_return)
return to_return
def flattened_group_associations(node, all_associations, to_return): #works only for agents
#import pdb; pdb.set_trace()
to_return.append(node)
for association in all_associations:
if association.has_associate.id == node.id and association.from_agent.agent_type.party_type!="individual":
flattened_group_associations(association.is_associate, all_associations, to_return)
return to_return
def agent_dfs_by_association(node, all_associations, depth): #works only for agents
#todo: figure out why this failed when AAs were ordered by from_agent
#import pdb; pdb.set_trace()
node.depth = depth
to_return = [node,]
for association in all_associations:
if association.has_associate.id == node.id and association.association_type.identifier == "child":
to_return.extend(agent_dfs_by_association(association.is_associate, all_associations, depth+1))
return to_return
def group_dfs_by_has_associate(root, node, all_associations, visited, depth):
#works only for agents, and only follows association_from
#import pdb; pdb.set_trace()
to_return = []
if node not in visited:
visited.append(node)
node.depth = depth
to_return.append(node)
for association in all_associations:
if association.has_associate.id == node.id:
to_return.extend(group_dfs_by_has_associate(root, association.is_associate, all_associations, visited, depth+1))
return to_return
def group_dfs_by_is_associate(root, node, all_associations, visited, depth):
#import pdb; pdb.set_trace()
to_return = []
if node not in visited:
visited.append(node)
node.depth = depth
to_return.append(node)
for association in all_associations:
if association.is_associate.id == node.id:
to_return.extend(group_dfs_by_is_associate(root, association.has_associate, all_associations, visited, depth+1))
return to_return
class Edge(object):
def __init__(self, from_node, to_node, label):
self.from_node = from_node
self.to_node = to_node
self.label = label
self.width = 1
def dictify(self):
d = {
"from_node": self.from_node.node_id(),
"to_node": self.to_node.node_id(),
"label": self.label,
"width": self.width,
}
return d
def process_link_label(from_process, to_process):
outputs = [oc.resource_type for oc in from_process.outgoing_commitments()]
inputs = [ic.resource_type for ic in to_process.incoming_commitments()]
intersect = set(outputs) & set(inputs)
label = ", ".join(rt.name for rt in intersect)
return label
def process_graph(processes):
nodes = []
visited = set()
connections = set()
edges = []
for p in processes:
if p not in visited:
visited.add(p)
project_id = ""
if p.context_agent:
project_id = p.context_agent.node_id()
d = {
"id": p.node_id(),
"name": p.name,
"project-id": project_id,
"start": p.start_date.strftime('%Y-%m-%d'),
"end": p.end_date.strftime('%Y-%m-%d'),
}
nodes.append(d)
next = p.next_processes()
for n in next:
if n not in visited:
visited.add(n)
project_id = ""
if p.context_agent:
project_id = p.context_agent.node_id()
d = {
"id": n.node_id(),
"name": n.name,
"project-id": project_id,
"start": n.start_date.strftime('%Y-%m-%d'),
"end": n.end_date.strftime('%Y-%m-%d'),
}
nodes.append(d)
c = "-".join([str(p.id), str(n.id)])
if c not in connections:
connections.add(c)
label = process_link_label(p, n)
edge = Edge(p, n, label)
edges.append(edge.dictify())
prev = p.previous_processes()
for n in prev:
if n not in visited:
visited.add(n)
project_id = ""
if p.context_agent:
project_id = p.context_agent.node_id()
d = {
"id": n.node_id(),
"name": n.name,
"project-id": project_id,
"start": n.start_date.strftime('%Y-%m-%d'),
"end": n.end_date.strftime('%Y-%m-%d'),
}
nodes.append(d)
c = "-".join([str(n.id), str(p.id)])
if c not in connections:
connections.add(c)
label = process_link_label(n, p)
edge = Edge(n, p, label)
edges.append(edge.dictify())
big_d = {
"nodes": nodes,
"edges": edges,
}
return big_d
def project_process_resource_agent_graph(project_list, process_list):
processes = {}
rt_set = set()
orders = {}
agents = {}
agent_dict = {}
for p in process_list:
dp = {
"name": p.name,
"type": "process",
"url": "".join([get_url_starter(), p.get_absolute_url()]),
"project-id": get_project_id(p),
"order-id": get_order_id(p),
"start": p.start_date.strftime('%Y-%m-%d'),
"end": p.end_date.strftime('%Y-%m-%d'),
"orphan": p.is_orphan(),
"next": []
}
processes[p.node_id()] = dp
for p in process_list:
order = p.independent_demand()
if order:
orders[order.node_id()] = get_order_details(order, get_url_starter(), processes)
orts = p.outgoing_commitments()
for ort in orts:
if ort not in rt_set:
rt_set.add(ort)
next_ids = [ort.resource_type_node_id() for ort in p.outgoing_commitments()]
processes[p.node_id()]["next"].extend(next_ids)
agnts = p.working_agents()
for a in agnts:
if a not in agent_dict:
agent_dict[a] = []
agent_dict[a].append(p)
for agnt, procs in agent_dict.items():
da = {
"name": agnt.name,
"type": "agent",
"processes": []
}
for p in procs:
da["processes"].append(p.node_id())
agents[agnt.node_id()] = da
big_d = {
"projects": get_projects(project_list),
"processes": processes,
"agents": agents,
"resource_types": get_resource_types(rt_set, processes),
"orders": orders,
}
return big_d
def get_order_id(p):
order = p.independent_demand()
order_id = ''
if order:
order_id = order.node_id()
return order_id
def get_resource_types(rt_set, processes):
resource_types = {}
for ort in rt_set:
rt = ort.resource_type
name = rt.name
if ort.stage:
name = "@".join([name, ort.stage.name])
drt = {
"name": name,
"type": "resourcetype",
"url": "".join([get_url_starter(), rt.get_absolute_url()]),
"photo-url": rt.photo_url,
"next": []
}
for wct in rt.wanting_commitments():
match = False
if ort.stage:
if wct.stage == ort.stage:
match = True
else:
match = True
if match:
if wct.process:
p_id = wct.process.node_id()
if p_id in processes:
drt["next"].append(p_id)
resource_types[ort.resource_type_node_id()] = drt
return resource_types
def get_projects(project_list):
projects = {}
for p in project_list:
d = {
"name": p.name,
}
projects[p.node_id()] = d
return projects
def get_project_id(p):
project_id = ""
if p.context_agent:
project_id = p.context_agent.node_id()
return project_id
def get_url_starter():
return "".join(["http://", Site.objects.get_current().domain])
def get_order_details(order, url_starter, processes):
receiver_name = ""
if order.receiver:
receiver_name = order.receiver.name
dord = {
"name": order.__unicode__(),
"type": "order",
"for": receiver_name,
"due": order.due_date.strftime('%Y-%m-%d'),
"url": "".join([url_starter, order.get_absolute_url()]),
"processes": []
}
return dord
def project_process_graph(project_list, process_list):
projects = {}
processes = {}
agents = {}
agent_dict = {}
for p in project_list:
d = {
"name": p.name,
}
projects[p.node_id()] = d
for p in process_list:
project_id = ""
if p.context_agent:
project_id = p.context_agent.node_id()
dp = {
"name": p.name,
"project-id": project_id,
"start": p.start_date.strftime('%Y-%m-%d'),
"end": p.end_date.strftime('%Y-%m-%d'),
"next": []
}
processes[p.node_id()] = dp
p.dp = dp
agnts = p.working_agents()
for a in agnts:
if a not in agent_dict:
agent_dict[a] = []
agent_dict[a].append(p)
for p in process_list:
next_ids = [n.node_id() for n in p.next_processes()]
p.dp["next"].extend(next_ids)
for agnt, procs in agent_dict.items():
da = {
"name": agnt.name,
"processes": []
}
for p in procs:
da["processes"].append(p.node_id())
agents[agnt.node_id()] = da
big_d = {
"projects": projects,
"processes": processes,
"agents": agents,
}
return big_d
def project_graph(producers):
nodes = []
edges = []
#import pdb; pdb.set_trace()
for p in producers:
for rt in p.produced_resource_type_relationships():
for pt in rt.resource_type.consuming_process_type_relationships():
if p.context_agent and pt.process_type.context_agent:
if p.context_agent != pt.process_type.context_agent:
nodes.extend([p.context_agent, pt.process_type.context_agent, rt.resource_type])
edges.append(Edge(p.context_agent, rt.resource_type, rt.event_type.label))
edges.append(Edge(rt.resource_type, pt.process_type.context_agent, pt.inverse_label()))
return [nodes, edges]
def explode(process_type_relationship, nodes, edges, depth, depth_limit):
if depth > depth_limit:
return
#if process_type_relationship.process_type.name.startswith('Q'):
# return
nodes.append(process_type_relationship.process_type)
edges.append(Edge(
process_type_relationship.process_type,
process_type_relationship.resource_type,
process_type_relationship.event_type.label
))
for rtr in process_type_relationship.process_type.consumed_and_used_resource_type_relationships():
nodes.append(rtr.resource_type)
edges.append(Edge(rtr.resource_type, process_type_relationship.process_type, rtr.inverse_label()))
for art in rtr.resource_type.producing_agent_relationships():
nodes.append(art.agent)
edges.append(Edge(art.agent, rtr.resource_type, art.event_type.label))
#todo pr: shd this use own or own_or_parent_recipes?
for pt in rtr.resource_type.producing_process_type_relationships():
explode(pt, nodes, edges, depth+1, depth_limit)
def graphify(focus, depth_limit):
nodes = [focus]
edges = []
for art in focus.consuming_agent_relationships():
nodes.append(art.agent)
edges.append(Edge(focus, art.agent, art.event_type.label))
#todo pr: shd this use own or own_or_parent_recipes?
for ptr in focus.producing_process_type_relationships():
explode(ptr, nodes, edges, 0, depth_limit)
return [nodes, edges]
def project_network():
producers = [p for p in ProcessType.objects.all() if p.produced_resource_types()]
nodes = []
edges = []
for p in producers:
for rt in p.produced_resource_types():
for pt in rt.consuming_process_types():
if p.context_agent != pt.context_agent:
nodes.extend([p.context_agent, pt.context_agent, rt])
edges.append(Edge(p.context_agent, rt))
edges.append(Edge(rt, pt.context_agent))
return [nodes, edges]
class TimelineEvent(object):
def __init__(self, node, start, end, title, link, description):
self.node = node
self.start = start
self.end = end
self.title = title
self.link = link
self.description = description
def dictify(self):
descrip = ""
if self.description:
descrip = self.description
d = {
"start": self.start.strftime("%b %e %Y 00:00:00 GMT-0600"),
"title": self.title,
"description": linebreaks(descrip),
}
if self.end:
d["end"] = self.end.strftime("%b %e %Y 00:00:00 GMT-0600")
d["durationEvent"] = True
else:
d["durationEvent"] = False
if self.link:
d["link"] = self.link
mrq = []
for mreq in self.node.consumed_input_requirements():
abbrev = mreq.unit_of_quantity.abbrev or ""
label = " ".join([
str(mreq.quantity),
abbrev,
mreq.resource_type.name])
mrq.append(label)
d["consumableReqmts"] = mrq
trq = []
for treq in self.node.used_input_requirements():
abbrev = treq.unit_of_quantity.abbrev or ""
label = " ".join([
str(treq.quantity),
abbrev,
treq.resource_type.name])
trq.append(label)
d["usableReqmts"] = trq
wrq = []
for wreq in self.node.work_requirements():
abbrev = wreq.unit_of_quantity.abbrev or ""
label = " ".join([
str(wreq.quantity),
abbrev,
wreq.resource_type.name])
wrq.append(label)
d["workReqmts"] = wrq
items = []
for item in self.node.order_items():
abbrev = item.unit_of_quantity.abbrev or ""
label = " ".join([
str(item.quantity),
abbrev,
item.resource_type.name])
items.append(label)
d["orderItems"] = items
prevs = []
try:
for p in self.node.previous_processes():
label = "~".join([
p.get_absolute_url(),
p.name])
prevs.append(label)
except:
pass
d["previous"] = prevs
next = []
try:
for p in self.node.next_processes():
label = "~".join([
p.get_absolute_url(),
p.name])
next.append(label)
except:
pass
d["next"] = next
return d
def create_events(orders, processes, events):
for order in orders:
te = TimelineEvent(
order,
order.due_date,
"",
order.timeline_title(),
order.get_absolute_url(),
order.timeline_description(),
)
events['events'].append(te.dictify())
for process in processes:
te = TimelineEvent(
process,
process.start_date,
process.end_date,
process.timeline_title(),
process.get_absolute_url(),
process.timeline_description(),
)
events['events'].append(te.dictify())
def explode_events(resource_type, backsked_date, events):
for art in resource_type.producing_agent_relationships():
order_date = backsked_date - datetime.timedelta(days=art.lead_time)
te = TimelineEvent(
art,
order_date,
"",
art.timeline_title(),
resource_type.url,
resource_type.description,
)
events['events'].append(te.dictify())
for pp in resource_type.producing_process_types():
start_date = backsked_date - datetime.timedelta(days=(pp.estimated_duration/1440))
ppte = TimelineEvent(
pp,
start_date,
backsked_date,
pp.timeline_title(),
pp.url,
pp.description,
)
events['events'].append(ppte.dictify())
for crt in pp.consumed_resource_types():
explode_events(crt, start_date, events)
def backschedule_process_types(commitment, process_type,events):
lead_time=1
arts = None
if commitment.from_agent:
arts = commitment.from_agent.resource_types.filter(resource_type=commitment.resource_type)
if arts:
lead_time = arts[0].lead_time
end_date = commitment.due_date - datetime.timedelta(days=lead_time)
start_date = end_date - datetime.timedelta(days=(process_type.estimated_duration/1440))
ppte = TimelineEvent(
process_type,
start_date,
end_date,
process_type.timeline_title(),
process_type.url,
process_type.description,
)
events['events'].append(ppte.dictify())
for crt in process_type.consumed_resource_types():
explode_events(crt, start_date, events)
def backschedule_process(order, process, events):
te = TimelineEvent(
process,
process.start_date,
process.end_date,
process.timeline_title(),
process.url,
process.notes,
)
events['events'].append(te.dictify())
for ic in process.incoming_commitments():
te = TimelineEvent(
ic,
ic.due_date,
"",
ic.timeline_title(),
ic.url,
ic.description,
)
events['events'].append(te.dictify())
resource_type = ic.resource_type
pcs = ic.associated_producing_commitments()
if pcs:
for pc in pcs:
if pc.order_item == ic.order_item:
te = TimelineEvent(
pc,
pc.due_date,
"",
pc.timeline_title(),
pc.url,
pc.description,
)
events['events'].append(te.dictify())
backschedule_process(order, pc.process, events)
return events
def backschedule_order(order, events):
te = TimelineEvent(
order,
order.due_date,
"",
order.timeline_title(),
"",
order.description,
)
events['events'].append(te.dictify())
for pc in order.producing_commitments():
te = TimelineEvent(
pc,
pc.due_date,
"",
pc.timeline_title(),
pc.url,
pc.description,
)
events['events'].append(te.dictify())
backschedule_process(order, pc.process, events)
class XbillNode(object):
def __init__(self, node, depth):
self.node = node
self.depth = depth
self.open = False
self.close = []
self.xbill_class = self.node.xbill_class()
def xbill_object(self):
return self.node.xbill_child_object()
def xbill_label(self):
return self.node.xbill_label()
def xbill_explanation(self):
return self.node.xbill_explanation()
#def category(self):
# return self.node.xbill_category()
def xbill_dfs(node, all_nodes, visited, depth):
"""
Performs a recursive depth-first search starting at ``node``.
"""
to_return = []
if node not in visited:
visited.append(node)
#to_return = [XbillNode(node,depth),]
to_return.append(XbillNode(node,depth))
#print "+created node:+", node, depth
for subnode in all_nodes:
parents = subnode.xbill_parent_object().xbill_parents()
xclass = subnode.xbill_class()
if subnode.node_id() != node.node_id():
if parents and node in parents:
#print "*active node:*", node, "*depth:*", depth, "*subnode:*", subnode, "*parent_object:*", subnode.xbill_parent_object(), "*parents:*", parents
#import pdb; pdb.set_trace()
to_return.extend(xbill_dfs(subnode, all_nodes, visited, depth+1))
return to_return
def explode_xbill_children(node, nodes, exploded):
if node not in nodes:
nodes.append(node)
#import pdb; pdb.set_trace()
xclass = node.xbill_class()
explode = True
if xclass == 'process-type':
#import pdb; pdb.set_trace()
pt = node.process_type
if pt in exploded:
explode = False
else:
exploded.append(pt)
if explode:
for kid in node.xbill_child_object().xbill_children():
explode_xbill_children(kid, nodes, exploded)
#todo: obsolete
def generate_xbill(resource_type):
nodes = []
exploded = []
for kid in resource_type.xbill_children():
explode_xbill_children(kid, nodes, exploded)
nodes = list(set(nodes))
#import pdb; pdb.set_trace()
to_return = []
visited = []
for kid in resource_type.xbill_children():
to_return.extend(xbill_dfs(kid, nodes, visited, 1))
annotate_tree_properties(to_return)
#to_return.sort(lambda x, y: cmp(x.xbill_object().name,
# y.xbill_object().name))
return to_return
#adapted from threaded_comments.util
def annotate_tree_properties(nodes):
"""
iterate through nodes and adds some magic properties to each of them
representing opening list of children and closing it
"""
if not nodes:
return
it = iter(nodes)
# get the first item, this will fail if no items !
old = it.next()
# first item starts a new thread
old.open = True
for c in it:
# increase the depth
if c.depth > old.depth:
c.open = True
else: # c.depth <= old.depth
# close some depths
old.close = range(old.depth - c.depth)
# iterate
old = c
old.close = range(old.depth)
| thierrymarianne/valuenetwork | valuenetwork/valueaccounting/utils.py | Python | agpl-3.0 | 24,484 |
import webapp2
from webapp2 import Route
from webapp2_extras import routes
import handlers_base
import registry.handlers
import widgets.handlers
import demos.mememator.handlers
import demos.shortener.handlers
import demos.instapaper.handlers
import demos.cloudfilepicker.handlers
import demos.imgur.handlers
import demos.inspirationmator.handlers
exampleRoutes = [ Route('/<:.*>', handlers_base.PageHandler, 'examples')]
demoRoutes = [
Route('/mememator/proxy', demos.mememator.handlers.ProxyHandler, 'demos/mememator'),
Route('/mememator/image', demos.mememator.handlers.ImageHandler, 'demos/mememator'),
Route('/mememator/image/<:.*>', demos.mememator.handlers.ImageHandler, 'demos/mememator'),
Route('/mememator/<:.*>', handlers_base.PageHandler, 'demos/mememator'),
Route('/imagestudio/<:.*>', handlers_base.PageHandler, 'demos/imagestudio'),
Route('/shortener/shorten', demos.shortener.handlers.ShortenHandler, 'demos/shortener'),
Route('/shortener/<:.*>', handlers_base.PageHandler, 'demos/shortener'),
Route('/instapaper/add', demos.instapaper.handlers.AddHandler, 'demos/instapaper'),
Route('/instapaper/<:.*>', handlers_base.PageHandler, 'demos/instapaper'),
Route('/imgur/save', demos.imgur.handlers.SaveHandler, 'demos/imgur'),
Route('/imgur/<:.*>', handlers_base.PageHandler, 'demos/imgur'),
Route('/twitter/<:.*>', handlers_base.PageHandler, 'demos/twitter'),
Route('/profilephoto/<:.*>', handlers_base.PageHandler, 'demos/profilephoto'),
Route('/<:.*>', handlers_base.PageHandler, 'demos')
]
app = webapp2.WSGIApplication([
Route('/tasks/crawl', registry.handlers.CrawlerTask),
Route('/tasks/parse-intent', registry.handlers.ParseIntentTask),
routes.DomainRoute('webintents-org.appspot.com', [
Route('/<:.*>', handlers_base.PageHandler, 'webintents')
]),
routes.DomainRoute('webintents.org', [
Route('/<:.*>', handlers_base.PageHandler, 'webintents')
]),
routes.DomainRoute('examples.webintents.org', exampleRoutes),
routes.DomainRoute('demos.webintents.org', demoRoutes),
routes.DomainRoute('registry.webintents-org.appspot.com', [
Route('/<:.*>', handlers_base.PageHandler, 'registry')
]),
routes.DomainRoute('widgets.webintents-org.appspot.com', [
Route('/<:.*>', widgets.handlers.PageHandler, 'widgets')
]),
routes.DomainRoute('registry.webintents.org', [
Route('/indexer.html', registry.handlers.IndexerHandler, 'registry'),
Route('/suggestions.html', registry.handlers.SuggestionsHandler, 'registry'),
Route('/<:.*>', handlers_base.PageHandler, 'registry')
]),
routes.DomainRoute('www.picksomeipsum.com', [
Route('/<:.*>', handlers_base.PageHandler, 'demos/picksomeipsum'),
]),
routes.DomainRoute('www.imagemator.com', [
Route('/<:.*>', handlers_base.PageHandler, 'demos/imagestudio'),
]),
routes.DomainRoute('www.comicmator.com', [
Route('/<:.*>', handlers_base.PageHandler, 'demos/comicmator'),
]),
routes.DomainRoute('www.quicksnapr.com', [
Route('/<:.*>', handlers_base.PageHandler, 'demos/profilephoto'),
]),
routes.DomainRoute('www.binhexdec.com', [
Route('/<:.*>', handlers_base.PageHandler, 'demos/binhexdec'),
]),
routes.DomainRoute('www.cloudfilepicker.com', [
Route('/proxy', demos.cloudfilepicker.handlers.ProxyHandler, 'demos/cloudfilepicker'),
Route('/<:.*>', handlers_base.PageHandler, 'demos/cloudfilepicker'),
]),
routes.DomainRoute('www.mememator.com', [
Route('/proxy', demos.mememator.handlers.ProxyHandler, 'demos/mememator'),
Route('/image', demos.mememator.handlers.ImageHandler, 'demos/mememator'),
Route('/image/<:.*>', demos.mememator.handlers.ImageHandler, 'demos/mememator'),
Route('/<:.*>', handlers_base.PageHandler, 'demos/mememator'),
]),
routes.DomainRoute('www.inspirationmator.com', [
Route('/proxy', demos.inspirationmator.handlers.ProxyHandler, 'demos/inspirationmator'),
Route('/image', demos.inspirationmator.handlers.ImageHandler, 'demos/inspirationmator'),
Route('/image/<:.*>', demos.inspirationmator.handlers.ImageHandler, 'demos/inspirationmator'),
Route('/<:.*>', handlers_base.PageHandler, 'demos/inspirationmator'),
])
])
| PaulKinlan/WebIntents | server/server.py | Python | apache-2.0 | 4,274 |
#! /usr/bin/python
# Copyright (C) 2012-2015, Alphan Ulusoy (alphan@bu.edu)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
from lomap.algorithms.product import ts_times_ts
from lomap.algorithms.optimal_run import optimal_run
# Logger configuration
logger = logging.getLogger(__name__)
def pretty_print(agent_cnt, prefix, suffix):
import string
# Pretty print the prefix and suffix_cycle on team_ts
hdr_line_1 = ''
hdr_line_2 = ''
for i in range(0,agent_cnt):
hdr_line_1 += string.ljust('Robot-%d' % (i+1), 20)
hdr_line_2 += string.ljust('-------', 20)
logger.info(hdr_line_1)
logger.info(hdr_line_2)
logger.info('*** Prefix: ***')
for s in prefix:
line = ''
for ss in s:
line += string.ljust('%s' % (ss,), 20)
logger.info(line)
logger.info('*** Suffix: ***')
for s in suffix:
line = ''
for ss in s:
line += string.ljust('%s' % (ss,), 20)
logger.info(line)
def multi_agent_optimal_run(ts_tuple, formula, opt_prop):
# Construct the team_ts
team_ts = ts_times_ts(ts_tuple)
# Find the optimal run and shortest prefix on team_ts
prefix_length, prefix_on_team_ts, suffix_cycle_cost, suffix_cycle_on_team_ts = optimal_run(team_ts, formula, opt_prop)
# Pretty print the run
pretty_print(len(ts_tuple), prefix_on_team_ts, suffix_cycle_on_team_ts)
# Project the run on team_ts down to individual agents
prefixes = []
suffix_cycles = []
for i in range(0, len(ts_tuple)):
ts = ts_tuple[i]
prefixes.append([x for x in [x[i] if x[i] in ts.g.node else None for x in prefix_on_team_ts] if x != None])
suffix_cycles.append([x for x in [x[i] if x[i] in ts.g.node else None for x in suffix_cycle_on_team_ts] if x!= None])
return (prefix_length, prefixes, suffix_cycle_cost, suffix_cycles)
| wasserfeder/lomap | lomap/algorithms/multi_agent_optimal_run.py | Python | gpl-2.0 | 2,403 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDt(RPackage):
"""Data objects in R can be rendered as HTML tables using the JavaScript
library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'
library has been included in this R package. The package name 'DT' is an
abbreviation of 'DataTables'."""
homepage = "http://rstudio.github.io/DT"
url = "https://cloud.r-project.org/src/contrib/DT_0.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/DT"
version('0.13', sha256='79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')
version('0.8', sha256='90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')
version('0.7', sha256='1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')
version('0.6', sha256='2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')
version('0.4', sha256='3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')
version('0.3', sha256='ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')
version('0.2', sha256='a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')
version('0.1', sha256='129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')
depends_on('r-htmltools@0.3.6:', type=('build', 'run'))
depends_on('r-htmlwidgets@1.3:', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-crosstalk', type=('build', 'run'))
depends_on('r-promises', when='@0.5:', type=('build', 'run'))
depends_on('r-jsonlite@0.9.16:', when='@0.8:', type=('build', 'run'))
| rspavel/spack | var/spack/repos/builtin/packages/r-dt/package.py | Python | lgpl-2.1 | 1,816 |
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import ez_setup
ez_setup.use_setuptools()
from ochopod import __version__
from setuptools import setup, find_packages
if sys.version_info < (2, 7):
raise NotImplementedError("python 2.7 or higher required")
setup(
name='ochopod',
version=__version__,
packages=find_packages(),
install_requires=
[
'flask>=0.10.1',
'kazoo>=2.0.0',
'jinja2>=2.7.3',
'pykka>=1.2.0'
],
entry_points=
{
'console_scripts':
[
'ocho = ochopod.tools.main:go'
]
},
package_data={
'ochopod':
[
'resources/*'
]
},
author='Autodesk Inc.',
author_email='autodesk.cloud.opensource@autodesk.com',
url='https://git.autodesk.com/cloudplatform-compute/ochopod',
license='Apache License, Version 2.0',
description='Ochopod, automatic container orchestration over Apache Mesos'
) | svoorhees/ochopod | sdk/setup.py | Python | apache-2.0 | 1,557 |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from numba import njit
@njit
def series_lt():
s1 = pd.Series([5, 4, 3, 2, 1])
s2 = pd.Series([0, 2, 3, 6, 8])
return s1.lt(s2) # Expect series of False, False, False, True, True
print(series_lt())
| IntelLabs/hpat | examples/series/series_lt.py | Python | bsd-2-clause | 1,748 |
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2015 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The encoders module defines encoder classes for use by the camera. Most users
will have no direct need to use these classes directly, but advanced users may
find them useful as base classes for :ref:`custom_encoders`.
.. note::
All classes in this module are available from the :mod:`picamera` namespace
without having to import :mod:`picamera.encoders` directly.
The following classes are defined in the module:
PiVideoFrameType
================
.. autoclass:: PiVideoFrameType
:members:
PiVideoFrame
============
.. autoclass:: PiVideoFrame(index, frame_type, frame_size, video_size, split_size, timestamp)
:members:
PiEncoder
=========
.. autoclass:: PiEncoder
:members:
:private-members:
PiVideoEncoder
==============
.. autoclass:: PiVideoEncoder
:members:
:private-members:
PiImageEncoder
==============
.. autoclass:: PiImageEncoder
:members:
:private-members:
PiRawMixin
==========
.. autoclass:: PiRawMixin
:members:
:private-members:
PiCookedVideoEncoder
====================
.. autoclass:: PiCookedVideoEncoder
:members:
:private-members:
PiRawVideoEncoder
=================
.. autoclass:: PiRawVideoEncoder
:members:
:private-members:
PiOneImageEncoder
=================
.. autoclass:: PiOneImageEncoder
:members:
:private-members:
PiMultiImageEncoder
===================
.. autoclass:: PiMultiImageEncoder
:members:
:private-members:
PiRawImageMixin
===============
.. autoclass:: PiRawImageMixin
:members:
:private-members:
PiCookedOneImageEncoder
=======================
.. autoclass:: PiCookedOneImageEncoder
:members:
:private-members:
PiRawOneImageEncoder
====================
.. autoclass:: PiRawOneImageEncoder
:members:
:private-members:
PiCookedMultiImageEncoder
=========================
.. autoclass:: PiCookedMultiImageEncoder
:members:
:private-members:
PiRawMultiImageEncoder
======================
.. autoclass:: PiRawMultiImageEncoder
:members:
:private-members:
"""
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str and range equivalent to Py3's
str = type('')
try:
range = xrange
except NameError:
pass
import io
import datetime
import threading
import warnings
import ctypes as ct
from collections import namedtuple
import picamera.mmal as mmal
from picamera.exc import (
mmal_check,
PiCameraError,
PiCameraMMALError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraDeprecated,
)
class PiVideoFrameType(object):
"""
This class simply defines constants used to represent the type of a frame
in :attr:`PiVideoFrame.frame_type`. Effectively it is a namespace for an
enum.
.. attribute:: frame
Indicates a predicted frame (P-frame). This is the most common frame
type.
.. attribute:: key_frame
Indicates an intra-frame (I-frame) also known as a key frame.
.. attribute:: sps_header
Indicates an inline SPS/PPS header (rather than picture data) which is
typically used as a split point.
.. attribute:: motion_data
Indicates the frame is inline motion vector data, rather than picture
data.
.. versionadded:: 1.5
"""
frame = 0
key_frame = 1
sps_header = 2
motion_data = 3
class PiVideoFrame(namedtuple('PiVideoFrame', (
'index', # the frame number, where the first frame is 0
'frame_type', # a constant indicating the frame type (see PiVideoFrameType)
'frame_size', # the size (in bytes) of the frame's data
'video_size', # the size (in bytes) of the video so far
'split_size', # the size (in bytes) of the video since the last split
'timestamp', # the presentation timestamp (PTS) of the frame
'complete', # whether the frame is complete or not
))):
"""
This class is a namedtuple derivative used to store information about a
video frame. It is recommended that you access the information stored by
this class by attribute name rather than position (for example:
``frame.index`` rather than ``frame[0]``).
.. attribute:: index
Returns the zero-based number of the frame. This is a monotonic counter
that is simply incremented every time the camera starts outputting a
new frame. As a consequence, this attribute cannot be used to detect
dropped frames. Nor does it necessarily represent actual frames; it
will be incremented for SPS headers and motion data buffers too.
.. attribute:: frame_type
Returns a constant indicating the kind of data that the frame contains
(see :class:`PiVideoFrameType`). Please note that certain frame types
contain no image data at all.
.. attribute:: frame_size
Returns the size in bytes of the current frame. If a frame is written
in multiple chunks, this value will increment while :attr:`index`
remains static. Query :attr:`complete` to determine whether the frame
has been completely output yet.
.. attribute:: video_size
Returns the size in bytes of the entire video up to the current frame.
Note that this is unlikely to match the size of the actual file/stream
written so far. This is because a stream may utilize buffering which
will cause the actual amount written (e.g. to disk) to lag behind the
value reported by this attribute.
.. attribute:: split_size
Returns the size in bytes of the video recorded since the last call to
either :meth:`~picamera.camera.PiCamera.start_recording` or
:meth:`~picamera.camera.PiCamera.split_recording`. For the reasons
explained above, this may differ from the size of the actual
file/stream written so far.
.. attribute:: timestamp
Returns the presentation timestamp (PTS) of the current frame as
reported by the encoder. This is represented by the number of
microseconds (millionths of a second) since video recording started. As
the frame attribute is only updated when the encoder outputs the end of
a frame, this value may lag behind the actual time since
:meth:`~picamera.camera.PiCamera.start_recording` was called.
.. warning::
Currently, the video encoder occasionally returns "time unknown"
values in this field which picamera represents as ``None``. If you
are querying this property you will need to check the value is not
``None`` before using it.
.. attribute:: complete
Returns a bool indicating whether the current frame is complete or not.
If the frame is complete then :attr:`frame_size` will not increment
any further, and will reset for the next frame.
.. versionchanged:: 1.5
Deprecated :attr:`header` and :attr:`keyframe` attributes and added the
new :attr:`frame_type` attribute instead.
.. versionchanged:: 1.9
Added the :attr:`complete` attribute.
"""
@property
def position(self):
"""
Returns the zero-based position of the frame in the stream containing
it.
"""
return self.split_size - self.frame_size
@property
def keyframe(self):
"""
Returns a bool indicating whether the current frame is a keyframe (an
intra-frame, or I-frame in MPEG parlance).
.. deprecated:: 1.5
Please compare :attr:`frame_type` to
:attr:`PiVideoFrameType.key_frame` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiVideoFrame.keyframe is deprecated; please check '
'PiVideoFrame.frame_type for equality with '
'PiVideoFrameType.key_frame instead'))
return self.frame_type == PiVideoFrameType.key_frame
@property
def header(self):
"""
Contains a bool indicating whether the current frame is actually an
SPS/PPS header. Typically it is best to split an H.264 stream so that
it starts with an SPS/PPS header.
.. deprecated:: 1.5
Please compare :attr:`frame_type` to
:attr:`PiVideoFrameType.sps_header` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiVideoFrame.header is deprecated; please check '
'PiVideoFrame.frame_type for equality with '
'PiVideoFrameType.sps_header instead'))
return self.frame_type == PiVideoFrameType.sps_header
def _debug_buffer(buf):
f = buf[0].flags
print(''.join((
'flags=',
'E' if f & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END else '_',
'K' if f & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else '_',
'C' if f & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else '_',
'M' if f & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else '_',
'X' if f & mmal.MMAL_BUFFER_HEADER_FLAG_EOS else '_',
' ',
'len=%d' % buf[0].length,
)))
def _encoder_callback(port, buf):
#_debug_buffer(buf)
encoder = ct.cast(port[0].userdata, ct.POINTER(ct.py_object))[0]
encoder._callback(port, buf)
_encoder_callback = mmal.MMAL_PORT_BH_CB_T(_encoder_callback)
class PiEncoder(object):
"""
Base implementation of an MMAL encoder for use by PiCamera.
The *parent* parameter specifies the :class:`~picamera.camera.PiCamera`
instance that has constructed the encoder. The *camera_port* parameter
provides the MMAL camera port that the encoder should enable for capture
(this will be the still or video port of the camera component). The
*input_port* parameter specifies the MMAL port that the encoder should
connect to its input. Sometimes this will be the same as the camera port,
but if other components are present in the pipeline (e.g. a splitter), it
may be different.
The *format* parameter specifies the format that the encoder should
produce in its output. This is specified as a string and will be one of
the following for image encoders:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
And one of the following for video encoders:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter is either ``None`` (indicating no resizing
should take place), or a ``(width, height)`` tuple specifying the
resolution that the output of the encoder should be resized to.
Finally, the *options* parameter specifies additional keyword arguments
that can be used to configure the encoder (e.g. bitrate for videos, or
quality for images).
The class has a number of attributes:
.. attribute:: camera_port
A pointer to the camera output port that needs to be activated and
deactivated in order to start/stop capture. This is not necessarily the
port that the encoder component's input port is connected to (for
example, in the case of video-port based captures, this will be the
camera video port behind the splitter).
.. attribute:: encoder
A pointer to the MMAL encoder component, or None if no encoder
component has been created (some encoder classes don't use an actual
encoder component, for example :class:`PiRawImageMixin`).
.. attribute:: encoder_connection
A pointer to the MMAL connection linking the encoder's input port to
the camera, splitter, or resizer output port (depending on
configuration), if any.
.. attribute:: event
A :class:`threading.Event` instance used to synchronize operations
(like start, stop, and split) between the control thread and the
callback thread.
.. attribute:: exception
If an exception occurs during the encoder callback, this attribute is
used to store the exception until it can be re-raised in the control
thread.
.. attribute:: format
The image or video format that the encoder is expected to produce. This
is equal to the value of the *format* parameter.
.. attribute:: input_port
A pointer to the MMAL port that the encoder component's input port
should be connected to.
.. attribute:: output_port
A pointer to the MMAL port of the encoder's output. In the case no
encoder component is created, this should be the camera/component
output port responsible for producing data. In other words, this
attribute **must** be set on initialization.
.. attribute:: outputs
A mapping of ``key`` to ``(output, opened)`` tuples where ``output``
is a file-like object, and ``opened`` is a bool indicating whether or
not we opened the output object (and thus whether we are responsible
for eventually closing it).
.. attribute:: outputs_lock
A :func:`threading.Lock` instance used to protect access to
:attr:`outputs`.
.. attribute:: parent
The :class:`~picamera.camera.PiCamera` instance that created this
PiEncoder instance.
.. attribute:: pool
A pointer to a pool of MMAL buffers.
.. attribute:: resizer
A pointer to the MMAL resizer component, or None if no resizer
component has been created.
.. attribute:: resizer_connection
A pointer to the MMAL connection linking the resizer's input port to
the camera or splitter's output port, if any.
"""
encoder_type = None
def __init__(
self, parent, camera_port, input_port, format, resize, **options):
self.parent = parent
self.format = format
self.encoder = None
self.resizer = None
self.encoder_connection = None
self.resizer_connection = None
self.camera_port = camera_port
self.input_port = input_port
self.output_port = None
self.pool = None
self.started_capture = False
self.outputs_lock = threading.Lock() # protects access to self.outputs
self.outputs = {}
self.exception = None
self.event = threading.Event()
self.stopped = True
try:
if parent.closed:
raise PiCameraRuntimeError("Camera is closed")
if resize:
self._create_resizer(*resize)
self._create_encoder(**options)
self._create_pool()
self._create_connections()
except:
self.close()
raise
def _create_resizer(self, width, height):
"""
Creates and configures an MMAL resizer component.
This is called when the initializer's *resize* parameter is something
other than ``None``. The *width* and *height* parameters are passed to
the constructed resizer. Note that this method only constructs the
resizer - it does not connect it to the encoder. The method sets the
:attr:`resizer` attribute to the constructed resizer component.
"""
self.resizer = ct.POINTER(mmal.MMAL_COMPONENT_T)()
mmal_check(
mmal.mmal_component_create(
mmal.MMAL_COMPONENT_DEFAULT_RESIZER, self.resizer),
prefix="Failed to create resizer component")
if not self.resizer[0].input_num:
raise PiCameraError("No input ports on resizer component")
if not self.resizer[0].output_num:
raise PiCameraError("No output ports on resizer component")
# Copy the original input port's format to the resizer's input,
# then the resizer's input format to the output, and configure it
mmal.mmal_format_copy(
self.resizer[0].input[0][0].format, self.input_port[0].format)
mmal_check(
mmal.mmal_port_format_commit(self.resizer[0].input[0]),
prefix="Failed to set resizer input port format")
mmal.mmal_format_copy(
self.resizer[0].output[0][0].format, self.resizer[0].input[0][0].format)
fmt = self.resizer[0].output[0][0].format
fmt[0].es[0].video.width = mmal.VCOS_ALIGN_UP(width, 32)
fmt[0].es[0].video.height = mmal.VCOS_ALIGN_UP(height, 16)
fmt[0].es[0].video.crop.x = 0
fmt[0].es[0].video.crop.y = 0
fmt[0].es[0].video.crop.width = width
fmt[0].es[0].video.crop.height = height
mmal_check(
mmal.mmal_port_format_commit(self.resizer[0].output[0]),
prefix="Failed to set resizer output port format")
def _create_encoder(self):
"""
Creates and configures the MMAL encoder component.
This method only constructs the encoder; it does not connect it to the
input port. The method sets the :attr:`encoder` attribute to the
constructed encoder component, and the :attr:`output_port` attribute to
the encoder's output port (or the previously constructed resizer's
output port if one has been requested). Descendent classes extend this
method to finalize encoder configuration.
.. note::
It should be noted that this method is called with the
initializer's ``option`` keyword arguments. This base
implementation expects no additional arguments, but descendent
classes extend the parameter list to include options relevant to
them.
"""
assert not self.encoder
self.encoder = ct.POINTER(mmal.MMAL_COMPONENT_T)()
mmal_check(
mmal.mmal_component_create(self.encoder_type, self.encoder),
prefix="Failed to create encoder component")
if not self.encoder[0].input_num:
raise PiCameraError("No input ports on encoder component")
if not self.encoder[0].output_num:
raise PiCameraError("No output ports on encoder component")
# Ensure output format is the same as the input
self.output_port = self.encoder[0].output[0]
if self.resizer:
mmal.mmal_format_copy(
self.encoder[0].input[0][0].format, self.resizer[0].output[0][0].format)
else:
mmal.mmal_format_copy(
self.encoder[0].input[0][0].format, self.input_port[0].format)
mmal_check(
mmal.mmal_port_format_commit(self.encoder[0].input[0]),
prefix="Failed to set encoder input port format")
mmal.mmal_format_copy(
self.output_port[0].format, self.encoder[0].input[0][0].format)
# Set buffer size and number to appropriate values
if self.format == 'mjpeg':
# There is a bug in the MJPEG encoder that causes a deadlock if the
# FIFO is full on shutdown. Increasing the encoder buffer size
# makes this less likely to happen. See
# https://github.com/raspberrypi/userland/issues/208
self.output_port[0].buffer_size = max(512 * 1024, self.output_port[0].buffer_size_recommended)
else:
self.output_port[0].buffer_size = self.output_port[0].buffer_size_recommended
self.output_port[0].buffer_num = self.output_port[0].buffer_num_recommended
# NOTE: We deliberately don't commit the output port format here as
# this is a base class and the output configuration is incomplete at
# this point. Descendents are expected to finish configuring the
# encoder and then commit the port format themselves
def _create_pool(self):
"""
Allocates a pool of MMAL buffers for the encoder.
This method is expected to construct an MMAL pool of buffers for the
:attr:`output_port`, and store the result in the :attr:`pool`
attribute.
"""
assert not self.pool
self.pool = mmal.mmal_port_pool_create(
self.output_port,
self.output_port[0].buffer_num,
self.output_port[0].buffer_size)
if not self.pool:
raise PiCameraError(
"Failed to create buffer header pool for encoder component")
def _create_connections(self):
"""
Creates all connections between MMAL components.
This method is called to connect the encoder and the optional resizer
to the input port provided by the camera. It sets the
:attr:`encoder_connection` and :attr:`resizer_connection` attributes as
required.
"""
assert not self.encoder_connection
if self.resizer:
self.resizer_connection = self.parent._connect_ports(
self.input_port, self.resizer[0].input[0])
self.encoder_connection = self.parent._connect_ports(
self.resizer[0].output[0], self.encoder[0].input[0])
else:
self.encoder_connection = self.parent._connect_ports(
self.input_port, self.encoder[0].input[0])
def _callback(self, port, buf):
"""
The encoder's main callback function.
When the encoder is active, this method is periodically called in a
background thread. The *port* parameter specifies the MMAL port
providing the output (typically this is the encoder's output port, but
in the case of unencoded captures may simply be a camera port), while
the *buf* parameter is an MMAL buffer header pointer which can be used
to obtain the data to write, along with meta-data about the current
frame.
This method *must* release the MMAL buffer header before returning
(failure to do so will cause a lockup), and should recycle buffers if
expecting further data (the :meth:`_callback_recycle` method can be
called to perform the latter duty). Finally, this method must set
:attr:`event` when the encoder has finished (and should set
:attr:`exception` if an exception occurred during encoding).
Developers wishing to write a custom encoder class may find it simpler
to override the :meth:`_callback_write` method, rather than deal with
these complexities.
"""
if self.stopped:
mmal.mmal_buffer_header_release(buf)
else:
stop = False
try:
try:
mmal_check(
mmal.mmal_buffer_header_mem_lock(buf),
prefix="Unable to lock buffer header memory")
try:
stop = self._callback_write(buf)
finally:
mmal.mmal_buffer_header_mem_unlock(buf)
finally:
mmal.mmal_buffer_header_release(buf)
self._callback_recycle(port, buf)
except Exception as e:
stop = True
self.exception = e
if stop:
self.stopped = True
self.event.set()
def _callback_write(self, buf, key=PiVideoFrameType.frame):
"""
Writes output on behalf of the encoder callback function.
This method is called by :meth:`_callback` to handle writing to an
object in :attr:`outputs` identified by *key*. The *buf* parameter is
an MMAL buffer header pointer which can be used to obtain the length of
data available (``buf[0].length``), a pointer to the data
(``buf[0].data``) which should typically be used with
:func:`ctypes.string_at`, and meta-data about the contents of the
buffer (``buf[0].flags``). The method is expected to return a boolean
to indicate whether output is complete (``True``) or whether more data
is expected (``False``).
The default implementation simply writes the contents of the buffer to
the output identified by *key*, and returns ``True`` if the buffer
flags indicate end of stream. Image encoders will typically override
the return value to indicate ``True`` on end of frame (as they only
wish to output a single image). Video encoders will typically override
this method to determine where key-frames and SPS headers occur.
"""
if buf[0].length:
with self.outputs_lock:
try:
written = self.outputs[key][0].write(
ct.string_at(buf[0].data, buf[0].length))
except KeyError:
pass
else:
# Ignore None return value; most Python 2 streams have
# no return value for write()
if (written is not None) and (written != buf[0].length):
raise PiCameraError(
"Unable to write buffer to output %s" % key)
return bool(buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS)
def _callback_recycle(self, port, buf):
"""
Recycles the buffer on behalf of the encoder callback function.
This method is called by :meth:`_callback` when there is a buffer to
recycle (because further output is expected). It is unlikely descendent
classes will have a need to override this method, but if they override
the :meth:`_callback` method they may wish to call it.
"""
new_buf = mmal.mmal_queue_get(self.pool[0].queue)
if not new_buf:
raise PiCameraError(
"Unable to get a buffer to return to the encoder port")
mmal_check(
mmal.mmal_port_send_buffer(port, new_buf),
prefix="Unable to return a buffer to the encoder port")
def _open_output(self, output, key=PiVideoFrameType.frame):
"""
Opens *output* and associates it with *key* in :attr:`outputs`.
If *output* is a string, this method opens it as a filename and keeps
track of the fact that the encoder was the one to open it (which
implies that :meth:`_close_output` should eventually close it).
Otherwise, *output* is assumed to be a file-like object and is used
verbatim. The opened output is added to the :attr:`outputs` dictionary
with the specified *key*.
"""
with self.outputs_lock:
opened = isinstance(output, (bytes, str))
if opened:
# Open files in binary mode with a decent buffer size
output = io.open(output, 'wb', buffering=65536)
self.outputs[key] = (output, opened)
def _close_output(self, key=PiVideoFrameType.frame):
"""
Closes the output associated with *key* in :attr:`outputs`.
Closes the output object associated with the specified *key*, and
removes it from the :attr:`outputs` dictionary (if we didn't open the
object then we attempt to flush it instead).
"""
with self.outputs_lock:
try:
(output, opened) = self.outputs.pop(key)
except KeyError:
pass
else:
if opened:
output.close()
else:
try:
output.flush()
except AttributeError:
pass
@property
def active(self):
"""
Returns ``True`` if the MMAL encoder exists and is enabled.
"""
return bool(self.encoder and self.output_port[0].is_enabled)
def start(self, output):
"""
Starts the encoder object writing to the specified output.
This method is called by the camera to start the encoder capturing
data from the camera to the specified output. The *output* parameter
is either a filename, or a file-like object (for image and video
encoders), or an iterable of filenames or file-like objects (for
multi-image encoders).
"""
self.event.clear()
self.stopped = False
self.exception = None
self._open_output(output)
self.output_port[0].userdata = ct.cast(
ct.pointer(ct.py_object(self)),
ct.c_void_p)
with self.parent._encoders_lock:
mmal_check(
mmal.mmal_port_enable(self.output_port, _encoder_callback),
prefix="Failed to enable encoder output port")
for q in range(mmal.mmal_queue_length(self.pool[0].queue)):
buf = mmal.mmal_queue_get(self.pool[0].queue)
if not buf:
raise PiCameraRuntimeError(
"Unable to get a required buffer from pool queue")
mmal_check(
mmal.mmal_port_send_buffer(self.output_port, buf),
prefix="Unable to send a buffer to encoder output port")
self.parent._start_capture(self.camera_port)
def wait(self, timeout=None):
"""
Waits for the encoder to finish (successfully or otherwise).
This method is called by the owning camera object to block execution
until the encoder has completed its task. If the *timeout* parameter
is None, the method will block indefinitely. Otherwise, the *timeout*
parameter specifies the (potentially fractional) number of seconds
to block for. If the encoder finishes successfully within the timeout,
the method returns ``True``. Otherwise, it returns ``False``.
"""
result = self.event.wait(timeout)
if result:
self.stop()
# Check whether the callback set an exception
if self.exception:
raise self.exception
return result
def stop(self):
"""
Stops the encoder, regardless of whether it's finished.
This method is called by the camera to terminate the execution of the
encoder. Typically, this is used with video to stop the recording, but
can potentially be called in the middle of image capture to terminate
the capture.
"""
# The check below is not a race condition; we ignore the EINVAL error
# in the case the port turns out to be disabled when we disable below.
# The check exists purely to prevent stderr getting spammed by our
# continued attempts to disable an already disabled port
with self.parent._encoders_lock:
if self.active:
self.parent._stop_capture(self.camera_port)
try:
mmal_check(
mmal.mmal_port_disable(self.output_port),
prefix="Failed to disable encoder output port")
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
self.stopped = True
self.event.set()
self._close_output()
def close(self):
"""
Finalizes the encoder and deallocates all structures.
This method is called by the camera prior to destroying the encoder (or
more precisely, letting it go out of scope to permit the garbage
collector to destroy it at some future time). The method destroys all
components that the various create methods constructed and resets their
attributes.
"""
self.stop()
if self.encoder_connection:
mmal.mmal_connection_destroy(self.encoder_connection)
self.encoder_connection = None
if self.pool:
mmal.mmal_port_pool_destroy(self.output_port, self.pool)
self.pool = None
if self.resizer_connection:
mmal.mmal_connection_destroy(self.resizer_connection)
if self.encoder:
mmal.mmal_component_destroy(self.encoder)
self.encoder = None
if self.resizer:
mmal.mmal_component_destroy(self.resizer)
self.resizer = None
self.output_port = None
class PiRawMixin(PiEncoder):
"""
Mixin class for "raw" (unencoded) output.
This mixin class overrides the initializer of :class:`PiEncoder`, along
with :meth:`_create_resizer` and :meth:`_create_encoder` to configure the
pipeline for unencoded output. Specifically, it disables the construction
of an encoder, and sets the output port to the input port passed to the
initializer, unless resizing is required (either for actual resizing, or
for format conversion) in which case the resizer's output is used.
"""
RAW_ENCODINGS = {
# name mmal-encoding bytes-per-pixel
'yuv': (mmal.MMAL_ENCODING_I420, 1.5),
'rgb': (mmal.MMAL_ENCODING_RGBA, 3),
'rgba': (mmal.MMAL_ENCODING_RGBA, 4),
'bgr': (mmal.MMAL_ENCODING_BGRA, 3),
'bgra': (mmal.MMAL_ENCODING_BGRA, 4),
}
def __init__(
self, parent, camera_port, input_port, format, resize, **options):
# If a resize hasn't been requested, check the input_port format. If
# it requires conversion, force the use of a resizer to perform the
# conversion
if not resize:
if parent.RAW_FORMATS[format] != input_port[0].format[0].encoding.value:
resize = parent.resolution
# Workaround: If a non-alpha format is requested when a resizer is
# required, we use the alpha-inclusive format and set a flag to get the
# callback to strip the alpha bytes (for some reason the resizer won't
# work with non-alpha output formats - firmware bug?)
if resize:
width, height = resize
self._strip_alpha = format in ('rgb', 'bgr')
else:
width, height = parent.resolution
self._strip_alpha = False
width = mmal.VCOS_ALIGN_UP(width, 32)
height = mmal.VCOS_ALIGN_UP(height, 16)
# Workaround (#83): when the resizer is used the width and height must
# be aligned (both the actual and crop values) to avoid an error when
# the output port format is set
if resize:
resize = (width, height)
# Workaround: Calculate the expected image size, to be used by the
# callback to decide when a frame ends. This is to work around a
# firmware bug that causes the raw image to be returned twice when the
# maximum camera resolution is requested
self._frame_size = int(width * height * self.RAW_ENCODINGS[format][1])
super(PiRawMixin, self).__init__(
parent, camera_port, input_port, format, resize, **options)
def _create_resizer(self, width, height):
"""
Overridden to configure the resizer's output with the required
encoding.
"""
super(PiRawMixin, self)._create_resizer(width, height)
encoding = self.RAW_ENCODINGS[self.format][0]
port = self.resizer[0].output[0]
port[0].format[0].encoding = encoding
port[0].format[0].encoding_variant = encoding
mmal_check(
mmal.mmal_port_format_commit(port),
prefix="Failed to set resizer output port format")
def _create_encoder(self):
"""
Overridden to skip creating an encoder. Instead, this class simply uses
the resizer's port as the output port (if a resizer has been
configured) or the specified input port otherwise.
"""
if self.resizer:
self.output_port = self.resizer[0].output[0]
else:
self.output_port = self.input_port
def _create_connections(self):
"""
Overridden to skip creating an encoder connection; only a resizer
connection is required (if one has been configured).
"""
if self.resizer:
self.resizer_connection = self.parent._connect_ports(
self.input_port, self.resizer[0].input[0])
@property
def active(self):
return bool(self.output_port[0].is_enabled)
def _callback_write(self, buf, key=PiVideoFrameType.frame):
"""
Overridden to strip alpha bytes when required.
"""
if self._strip_alpha:
s = ct.string_at(buf[0].data, buf[0].length)
s = bytearray(s)
del s[3::4]
# All this messing around with buffers is to work around some issue
# with MMAL or ctypes (I'm not sure which is at fault). Anyway, the
# upshot is that if you fiddle with buf[0].data in any way
# whatsoever (even if you make every attempt to restore its value
# afterward), mmal_port_disable locks up when we call it in stop()
new_buf = mmal.MMAL_BUFFER_HEADER_T.from_buffer_copy(buf[0])
new_buf.length = len(s)
new_buf.data = ct.pointer(ct.c_uint8.from_buffer(s))
return super(PiRawMixin, self)._callback_write(ct.pointer(new_buf), key)
else:
return super(PiRawMixin, self)._callback_write(buf, key)
class PiVideoEncoder(PiEncoder):
"""
Encoder for video recording.
This derivative of :class:`PiEncoder` configures itself for H.264 or MJPEG
encoding. It also introduces a :meth:`split` method which is used by
:meth:`~picamera.camera.PiCamera.split_recording` and
:meth:`~picamera.camera.PiCamera.record_sequence` to redirect future output
to a new filename or object. Finally, it also extends
:meth:`PiEncoder.start` and :meth:`PiEncoder._callback_write` to track
video frame meta-data, and to permit recording motion data to a separate
output object.
"""
encoder_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_ENCODER
def __init__(
self, parent, camera_port, input_port, format, resize, **options):
super(PiVideoEncoder, self).__init__(
parent, camera_port, input_port, format, resize, **options)
self._next_output = []
self.frame = None
def _create_encoder(
self, bitrate=17000000, intra_period=None, profile='high',
quantization=0, quality=0, inline_headers=True, sei=False,
motion_output=None, intra_refresh=None):
"""
Extends the base :meth:`~PiEncoder._create_encoder` implementation to
configure the video encoder for H.264 or MJPEG output.
"""
super(PiVideoEncoder, self)._create_encoder()
# XXX Remove quantization in 2.0
quality = quality or quantization
try:
self.output_port[0].format[0].encoding = {
'h264': mmal.MMAL_ENCODING_H264,
'mjpeg': mmal.MMAL_ENCODING_MJPEG,
}[self.format]
except KeyError:
raise PiCameraValueError('Unrecognized format %s' % self.format)
if not (0 <= bitrate <= 25000000):
raise PiCameraValueError('bitrate must be between 0 and 25Mbps')
self.output_port[0].format[0].bitrate = bitrate
self.output_port[0].format[0].es[0].video.frame_rate.num = 0
self.output_port[0].format[0].es[0].video.frame_rate.den = 1
mmal_check(
mmal.mmal_port_format_commit(self.output_port),
prefix="Unable to set format on encoder output port")
if self.format == 'h264':
mp = mmal.MMAL_PARAMETER_VIDEO_PROFILE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_PROFILE,
ct.sizeof(mmal.MMAL_PARAMETER_VIDEO_PROFILE_T),
),
)
try:
mp.profile[0].profile = {
'baseline': mmal.MMAL_VIDEO_PROFILE_H264_BASELINE,
'main': mmal.MMAL_VIDEO_PROFILE_H264_MAIN,
'high': mmal.MMAL_VIDEO_PROFILE_H264_HIGH,
'constrained': mmal.MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE,
}[profile]
except KeyError:
raise PiCameraValueError("Invalid H.264 profile %s" % profile)
mp.profile[0].level = mmal.MMAL_VIDEO_LEVEL_H264_4
mmal_check(
mmal.mmal_port_parameter_set(self.output_port, mp.hdr),
prefix="Unable to set encoder H.264 profile")
if inline_headers:
mmal_check(
mmal.mmal_port_parameter_set_boolean(
self.output_port,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER,
mmal.MMAL_TRUE),
prefix="Unable to set inline_headers")
if sei:
mmal_check(
mmal.mmal_port_parameter_set_boolean(
self.output_port,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE,
mmal.MMAL_TRUE),
prefix="Unable to set SEI")
if motion_output:
mmal_check(
mmal.mmal_port_parameter_set_boolean(
self.output_port,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS,
mmal.MMAL_TRUE),
prefix="Unable to set inline motion vectors")
# We need the intra-period to calculate the SPS header timeout in
# the split method below. If one is not set explicitly, query the
# encoder's default
if intra_period is not None:
mp = mmal.MMAL_PARAMETER_UINT32_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INTRAPERIOD,
ct.sizeof(mmal.MMAL_PARAMETER_UINT32_T),
),
intra_period
)
mmal_check(
mmal.mmal_port_parameter_set(self.output_port, mp.hdr),
prefix="Unable to set encoder intra_period")
self._intra_period = intra_period
else:
mp = mmal.MMAL_PARAMETER_UINT32_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INTRAPERIOD,
ct.sizeof(mmal.MMAL_PARAMETER_UINT32_T),
))
mmal_check(
mmal.mmal_port_parameter_get(self.output_port, mp.hdr),
prefix="Unable to get encoder intra_period")
self._intra_period = mp.value
if intra_refresh is not None:
# Get the intra-refresh structure first as there are several
# other fields in it which we don't wish to overwrite
mp = mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH,
ct.sizeof(mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T),
))
# Deliberately avoid checking whether this call succeeds
mmal.mmal_port_parameter_get(self.output_port, mp.hdr)
try:
mp.refresh_mode = {
'cyclic': mmal.MMAL_VIDEO_INTRA_REFRESH_CYCLIC,
'adaptive': mmal.MMAL_VIDEO_INTRA_REFRESH_ADAPTIVE,
'both': mmal.MMAL_VIDEO_INTRA_REFRESH_BOTH,
'cyclicrows': mmal.MMAL_VIDEO_INTRA_REFRESH_CYCLIC_MROWS,
}[intra_refresh]
except KeyError:
raise PiCameraValueError(
"Invalid intra_refresh %s" % intra_refresh)
mmal_check(
mmal.mmal_port_parameter_set(self.output_port, mp.hdr),
prefix="Unable to set encoder intra_refresh")
elif self.format == 'mjpeg':
# MJPEG doesn't have an intra_period setting as such, but as every
# frame is a full-frame, the intra_period is effectively 1
self._intra_period = 1
if quality:
mp = mmal.MMAL_PARAMETER_UINT32_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT,
ct.sizeof(mmal.MMAL_PARAMETER_UINT32_T),
),
quality
)
mmal_check(
mmal.mmal_port_parameter_set(self.output_port, mp.hdr),
prefix="Unable to set initial quality")
mp = mmal.MMAL_PARAMETER_UINT32_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT,
ct.sizeof(mmal.MMAL_PARAMETER_UINT32_T),
),
quality,
)
mmal_check(
mmal.mmal_port_parameter_set(self.output_port, mp.hdr),
prefix="Unable to set minimum quality")
mp = mmal.MMAL_PARAMETER_UINT32_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT,
ct.sizeof(mmal.MMAL_PARAMETER_UINT32_T),
),
quality,
)
mmal_check(
mmal.mmal_port_parameter_set(self.output_port, mp.hdr),
prefix="Unable to set maximum quality")
mmal_check(
mmal.mmal_port_parameter_set_boolean(
self.encoder[0].input[0],
mmal.MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
1),
prefix="Unable to set immutable flag on encoder input port")
mmal_check(
mmal.mmal_component_enable(self.encoder),
prefix="Unable to enable video encoder component")
def start(self, output, motion_output=None):
"""
Extended to initialize video frame meta-data tracking.
"""
self.frame = PiVideoFrame(
index=0,
frame_type=None,
frame_size=0,
video_size=0,
split_size=0,
timestamp=0,
complete=False,
)
if motion_output is not None:
self._open_output(motion_output, PiVideoFrameType.motion_data)
super(PiVideoEncoder, self).start(output)
def stop(self):
super(PiVideoEncoder, self).stop()
self._close_output(PiVideoFrameType.motion_data)
def split(self, output, motion_output=None):
"""
Called to switch the encoder's output.
This method is called by
:meth:`~picamera.camera.PiCamera.split_recording` and
:meth:`~picamera.camera.PiCamera.record_sequence` to switch the
encoder's :attr:`output` object to the *output* parameter (which can be
a filename or a file-like object, as with :meth:`start`).
"""
with self.outputs_lock:
outputs = {}
if output is not None:
outputs[PiVideoFrameType.frame] = output
if motion_output is not None:
outputs[PiVideoFrameType.motion_data] = motion_output
self._next_output.append(outputs)
# intra_period / framerate gives the time between I-frames (which
# should also coincide with SPS headers). We multiply by three to
# ensure the timeout is deliberately excessive, and clamp the minimum
# timeout to 1 second (otherwise unencoded formats tend to fail
# presumably due to I/O capacity)
timeout = max(1.0, float(self._intra_period / self.parent.framerate) * 3.0)
if not self.event.wait(timeout):
raise PiCameraRuntimeError(
'Timed out waiting for a split point')
self.event.clear()
def _callback_write(self, buf, key=PiVideoFrameType.frame):
"""
Extended to implement video frame meta-data tracking, and to handle
splitting video recording to the next output when :meth:`split` is
called.
"""
self.frame = PiVideoFrame(
index=
self.frame.index + 1
if self.frame.complete else
self.frame.index,
frame_type=
PiVideoFrameType.key_frame
if buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else
PiVideoFrameType.sps_header
if buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else
PiVideoFrameType.motion_data
if buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else
PiVideoFrameType.frame,
frame_size=
buf[0].length
if self.frame.complete else
self.frame.frame_size + buf[0].length,
video_size=
self.frame.video_size
if buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else
self.frame.video_size + buf[0].length,
split_size=
self.frame.split_size
if buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else
self.frame.split_size + buf[0].length,
timestamp=
None
if buf[0].pts in (0, mmal.MMAL_TIME_UNKNOWN) else
buf[0].pts,
complete=
bool(buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END),
)
if self.format != 'h264' or (buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG):
with self.outputs_lock:
try:
new_outputs = self._next_output.pop(0)
except IndexError:
new_outputs = None
if new_outputs:
for new_key, new_output in new_outputs.items():
self._close_output(new_key)
self._open_output(new_output, new_key)
if new_key == PiVideoFrameType.frame:
self.frame = PiVideoFrame(
index=self.frame.index,
frame_type=self.frame.frame_type,
frame_size=self.frame.frame_size,
video_size=self.frame.video_size,
split_size=0,
timestamp=self.frame.timestamp,
complete=self.frame.complete,
)
self.event.set()
if buf[0].flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO:
key = PiVideoFrameType.motion_data
return super(PiVideoEncoder, self)._callback_write(buf, key)
class PiCookedVideoEncoder(PiVideoEncoder):
"""
Video encoder for encoded recordings.
This class is a derivative of :class:`PiVideoEncoder` and only exists to
provide naming symmetry with the image encoder classes.
"""
class PiRawVideoEncoder(PiRawMixin, PiVideoEncoder):
"""
Video encoder for unencoded recordings.
This class is a derivative of :class:`PiVideoEncoder` and the
:class:`PiRawMixin` class intended for use with
:meth:`~picamera.camera.PiCamera.start_recording` when it is called with an
unencoded format.
.. warning::
This class creates an inheritance diamond. Take care to determine the
MRO of super-class calls.
"""
def _create_encoder(self):
super(PiRawVideoEncoder, self)._create_encoder()
# Raw formats don't have an intra_period setting as such, but as every
# frame is a full-frame, the intra_period is effectively 1
self._intra_period = 1
class PiImageEncoder(PiEncoder):
"""
Encoder for image capture.
This derivative of :class:`PiEncoder` extends the :meth:`_create_encoder`
method to configure the encoder for a variety of encoded image outputs
(JPEG, PNG, etc.).
"""
encoder_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_ENCODER
def _create_encoder(self, quality=85, thumbnail=(64, 48, 35), bayer=False):
"""
Extends the base :meth:`~PiEncoder._create_encoder` implementation to
configure the image encoder for JPEG, PNG, etc.
"""
super(PiImageEncoder, self)._create_encoder()
try:
self.output_port[0].format[0].encoding = {
'jpeg': mmal.MMAL_ENCODING_JPEG,
'png': mmal.MMAL_ENCODING_PNG,
'gif': mmal.MMAL_ENCODING_GIF,
'bmp': mmal.MMAL_ENCODING_BMP,
}[self.format]
except KeyError:
raise PiCameraValueError("Unrecognized format %s" % self.format)
mmal_check(
mmal.mmal_port_format_commit(self.output_port),
prefix="Unable to set format on encoder output port")
if self.format == 'jpeg':
mmal_check(
mmal.mmal_port_parameter_set_uint32(
self.output_port,
mmal.MMAL_PARAMETER_JPEG_Q_FACTOR,
quality),
prefix="Failed to set JPEG quality")
mmal_check(
mmal.mmal_port_parameter_set_boolean(
self.camera_port,
mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE,
int(bool(bayer))),
prefix="Failed to set raw capture")
if thumbnail is None:
mp = mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION,
ct.sizeof(mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T)
),
0, 0, 0, 0)
else:
mp = mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION,
ct.sizeof(mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T)
),
1, *thumbnail)
mmal_check(
mmal.mmal_port_parameter_set(self.encoder[0].control, mp.hdr),
prefix="Failed to set thumbnail configuration")
mmal_check(
mmal.mmal_component_enable(self.encoder),
prefix="Unable to enable encoder component")
class PiOneImageEncoder(PiImageEncoder):
"""
Encoder for single image capture.
This class simply extends :meth:`~PiEncoder._callback_write` to terminate
capture at frame end (i.e. after a single frame has been received).
"""
def _callback_write(self, buf, key=PiVideoFrameType.frame):
return (
super(PiOneImageEncoder, self)._callback_write(buf, key)
) or bool(
buf[0].flags & (
mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END |
mmal.MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED)
)
class PiMultiImageEncoder(PiImageEncoder):
"""
Encoder for multiple image capture.
This class extends :class:`PiImageEncoder` to handle an iterable of outputs
instead of a single output. The :meth:`~PiEncoder._callback_write` method
is extended to terminate capture when the iterable is exhausted, while
:meth:`PiEncoder._open_output` is overridden to begin iteration and rely
on the new :meth:`_next_output` method to advance output to the next item
in the iterable.
"""
def _open_output(self, outputs, key=PiVideoFrameType.frame):
self._output_iter = iter(outputs)
self._next_output(key)
def _next_output(self, key=PiVideoFrameType.frame):
"""
This method moves output to the next item from the iterable passed to
:meth:`~PiEncoder.start`.
"""
self._close_output(key)
super(PiMultiImageEncoder, self)._open_output(next(self._output_iter), key)
def _callback_write(self, buf, key=PiVideoFrameType.frame):
try:
if (
super(PiMultiImageEncoder, self)._callback_write(buf, key)
) or bool(
buf[0].flags & (
mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END |
mmal.MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED)
):
self._next_output(key)
return False
except StopIteration:
return True
class PiCookedOneImageEncoder(PiOneImageEncoder):
"""
Encoder for "cooked" (encoded) single image output.
This encoder extends :class:`PiOneImageEncoder` to include Exif tags in the
output.
"""
exif_encoding = 'ascii'
def _add_exif_tag(self, tag, value):
# Format the tag and value into an appropriate bytes string, encoded
# with the Exif encoding (ASCII)
if isinstance(tag, str):
tag = tag.encode(self.exif_encoding)
if isinstance(value, str):
value = value.encode(self.exif_encoding)
elif isinstance(value, datetime.datetime):
value = value.strftime('%Y:%m:%d %H:%M:%S').encode(self.exif_encoding)
# MMAL_PARAMETER_EXIF_T is a variable sized structure, hence all the
# mucking about with string buffers here...
buf = ct.create_string_buffer(
ct.sizeof(mmal.MMAL_PARAMETER_EXIF_T) + len(tag) + len(value) + 1)
mp = ct.cast(buf, ct.POINTER(mmal.MMAL_PARAMETER_EXIF_T))
mp[0].hdr.id = mmal.MMAL_PARAMETER_EXIF
mp[0].hdr.size = len(buf)
if (b'=' in tag or b'\x00' in value):
data = tag + value
mp[0].keylen = len(tag)
mp[0].value_offset = len(tag)
mp[0].valuelen = len(value)
else:
data = tag + b'=' + value
ct.memmove(mp[0].data, data, len(data))
mmal_check(
mmal.mmal_port_parameter_set(self.output_port, mp[0].hdr),
prefix="Failed to set Exif tag %s" % tag)
def start(self, output):
timestamp = datetime.datetime.now()
timestamp_tags = (
'EXIF.DateTimeDigitized',
'EXIF.DateTimeOriginal',
'IFD0.DateTime')
# Timestamp tags are always included with the value calculated
# above, but the user may choose to override the value in the
# exif_tags mapping
for tag in timestamp_tags:
self._add_exif_tag(tag, self.parent.exif_tags.get(tag, timestamp))
# All other tags are just copied in verbatim
for tag, value in self.parent.exif_tags.items():
if not tag in timestamp_tags:
self._add_exif_tag(tag, value)
super(PiCookedOneImageEncoder, self).start(output)
class PiCookedMultiImageEncoder(PiMultiImageEncoder):
"""
Encoder for "cooked" (encoded) multiple image output.
This encoder descends from :class:`PiMultiImageEncoder` but includes no
new functionality as video-port based encodes (which is all this class
is used for) don't support Exif tag output.
"""
pass
class PiRawImageMixin(PiRawMixin, PiImageEncoder):
"""
Mixin class for "raw" (unencoded) image capture.
The :meth:`_callback_write` method is overridden to manually calculate when
to terminate output.
"""
def __init__(
self, parent, camera_port, input_port, format, resize, **options):
super(PiRawImageMixin, self).__init__(
parent, camera_port, input_port, format, resize, **options)
self._image_size = 0
def _callback_write(self, buf, key=PiVideoFrameType.frame):
"""
Overridden to manually calculate when to terminate capture (see
comments in :meth:`__init__`).
"""
if self._image_size > 0:
super(PiRawImageMixin, self)._callback_write(buf, key)
self._image_size -= buf[0].length
return self._image_size <= 0
def start(self, output):
self._image_size = self._frame_size
super(PiRawImageMixin, self).start(output)
class PiRawOneImageEncoder(PiOneImageEncoder, PiRawImageMixin):
"""
Single image encoder for unencoded capture.
This class is a derivative of :class:`PiOneImageEncoder` and the
:class:`PiRawImageMixin` class intended for use with
:meth:`~picamera.camera.PiCamera.capture` (et al) when it is called with an
unencoded image format.
.. warning::
This class creates an inheritance diamond. Take care to determine the
MRO of super-class calls.
"""
pass
class PiRawMultiImageEncoder(PiMultiImageEncoder, PiRawImageMixin):
"""
Multiple image encoder for unencoded capture.
This class is a derivative of :class:`PiMultiImageEncoder` and the
:class:`PiRawImageMixin` class intended for use with
:meth:`~picamera.camera.PiCamera.capture_sequence` when it is called with
an unencoded image format.
.. warning::
This class creates an inheritance diamond. Take care to determine the
MRO of super-class calls.
"""
def _next_output(self, key=PiVideoFrameType.frame):
super(PiRawMultiImageEncoder, self)._next_output(key)
self._image_size = self._frame_size
| ListFranz/picamera | picamera/encoders.py | Python | bsd-3-clause | 63,187 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Post-Upsampling Super Resolution CNN (SRCNN) (2016)
# Paper: https://arxiv.org/pdf/1501.00092.pdf
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Conv2D, BatchNormalization, ReLU, Conv2DTranspose, Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
import sys
sys.path.append('../')
from models_c import Composable
class SRCNNPost(Composable):
''' Construct a Post Upsampling Super Resolution CNN '''
# Meta-parameter:
groups = [ { 'n_filters': 32, 'n_filters' : 64 } ]
# Initial Hyperparameters
hyperparameters = { 'initializer': 'he_normal',
'regularizer': None,
'relu_clip' : None,
'bn_epsilon' : None,
'use_bias' : False
}
def __init__(self, groups=None ,
input_shape=(32, 32, 3), include_top=True,
**hyperparameters):
""" Construct a Wids Residual (Convolutional Neural) Network
groups : metaparameter for group configuration
input_shape : input shape
include_top : include the reconstruction component
initializer : kernel initialization
regularizer : kernel regularization
relu_clip : max value for ReLU
bn_epsilon : epsilon for batch norm
use_bias : whether use bias in conjunction with batch norm
"""
# Configure base (super) class
Composable.__init__(self, input_shape, include_top, self.hyperparameters, **hyperparameters)
if groups is None:
groups = self.groups
# The input tensor
inputs = Input(input_shape)
# The stem convolutional group
x = self.stem(inputs)
# The learner
outputs = self.learner(x, groups)
# The reconstruction
if include_top:
outputs = self.decoder(outputs)
# Instantiate the Model
self._model = Model(inputs, outputs)
def stem(self, inputs):
""" Construct the Stem Convolutional Group
inputs : the input tensor
"""
# n1, dimensionality expansion with large coarse filter
x = self.Conv2D(inputs, 16, (3, 3), padding='same')
x = self.BatchNormalization(x)
x = self.ReLU(x)
return x
def learner(self, x, groups):
""" Construct the Learner
x : input to the learner
groups : group configuration
"""
for group in groups:
n_filters = group['n_filters' ]
x = self.Conv2D(x, n_filters, (3, 3), padding='same')
x = self.BatchNormalization(x)
x = self.ReLU(x)
return x
def decoder(self, x):
""" Construct the Decoder
x : input to the decoder
"""
# reconstruction
x = self.Conv2DTranspose(x, 3, (3, 3), strides=2, padding='same')
x = self.BatchNormalization(x)
x = Activation('sigmoid')(x)
return x
# Example
# srcnn = SRCNNPost()
| GoogleCloudPlatform/keras-idiomatic-programmer | zoo/srcnn/srcnn-post_c.py | Python | apache-2.0 | 3,721 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import models
from openerp import api, SUPERUSER_ID
def _setup_inalterability(cr, registry):
env = api.Environment(cr, SUPERUSER_ID, {})
# enable ping for this module
env['publisher_warranty.contract'].update_notification(cron_mode=True)
# make sure account_cancel is not usable at the same time as l10n_fr
# FORWARD PORT NOTICE
# In master as of March 2017, RCO-ODOO coded an exclusive field on modules to flag incompatibility
wanted_states = ['installed', 'to upgrade', 'to install']
account_cancel_module = env['ir.module.module'].search([('name', '=', 'account_cancel')], limit=1)
if account_cancel_module and account_cancel_module.state in wanted_states:
views_xml_id = env['ir.model.data'].search([('module', '=', 'account_cancel'), ('model', '=', 'ir.ui.view')])
ir_views = env['ir.ui.view'].browse([v.res_id for v in views_xml_id])
for cancel_view in ir_views:
cancel_view.write({'active': False})
fr_companies = env['res.company'].search([('partner_id.country_id.code', '=', 'FR')])
if fr_companies:
# create the securisation sequence per company
fr_companies._create_secure_sequence()
#reset the update_posted field on journals
journals = env['account.journal'].search([('company_id', 'in', fr_companies.ids)])
for journal in journals:
journal.write({'update_posted': False})
| Elico-Corp/odoo_OCB | addons/l10n_fr_certification/__init__.py | Python | agpl-3.0 | 1,524 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import ast
from pants.backend.python.tasks.checkstyle.common import CheckstylePlugin
class ClassFactoring(CheckstylePlugin):
"""Enforces recommendations for accessing class attributes.
Within classes, if you see:
class Distiller(object):
CONSTANT = "Foo"
def foo(self, value):
return os.path.join(Distiller.CONSTANT, value)
recommend using self.CONSTANT instead of Distiller.CONSTANT as otherwise
it makes subclassing impossible."""
def iter_class_accessors(self, class_node):
for node in ast.walk(class_node):
if isinstance(node, ast.Attribute) and isinstance(node.value, ast.Name) and (
node.value.id == class_node.name):
yield node
def nits(self):
for class_def in self.iter_ast_types(ast.ClassDef):
for node in self.iter_class_accessors(class_def):
yield self.warning('T800',
'Instead of {name}.{attr} use self.{attr} or cls.{attr} with instancemethods and '
'classmethods respectively.'.format(name=class_def.name, attr=node.attr))
| megaserg/pants | src/python/pants/backend/python/tasks/checkstyle/class_factoring.py | Python | apache-2.0 | 1,351 |
import research.utils
import argparse
import sys
parser = argparse.ArgumentParser(description='Flip the most significant bit in every byte of the file.')
parser.add_argument('input', nargs='?', type=argparse.FileType('br'), default=sys.stdin)
parser.add_argument('--output', '-o', nargs='?', type=argparse.FileType('bw'), default=sys.stdout)
args = parser.parse_args()
research.utils.flip_most_significant_bits(args.input, args.output)
| west-tandon/ReSearch | bin/flip.py | Python | mit | 438 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""
"""
__author__ = 'Bitcraze AB'
__all__ = ['InputMux']
import os
import glob
import logging
from cflib.utils.callbacks import Caller
logger = logging.getLogger(__name__)
MAX_THRUST = 65000
class InputMux(object):
def __init__(self, input):
self._devs = []
self.name = "N/A"
self.input = input
self._prev_values = {}
# Roll/pitch limitation
self.max_rp_angle = 0
# Thrust limitations
self.thrust_slew_enabled = True
self.thrust_slew_limit = 0
self.thrust_slew_rate = 0
self.max_thrust = 0
self.max_yaw_rate = 0
self.springy_throttle = True
self.trim_roll = 0
self.trim_pitch = 0
self.has_pressure_sensor = False
# TODO: Fix writing these values
#self._max_rp_angle = 40
#self._springy_throttle = True
#self._thrust_slew_enabled = True
#self._thrust_slew_limit = 30
#self._thrust_slew_rate = 30
#self._min_thrust = 20000
#self._max_thrust = 50000
#self._max_yaw_rate = 400
#self._trim_roll = 0.0
#self._trim_pitch = 0.0
# Stateful things
self._old_thrust = 0
self._old_raw_thrust = 0
self._old_alt_hold = False
# TODO: Should these really be placed here?
#self.input_updated = Caller()
#self.rp_trim_updated = Caller()
#self.emergency_stop_updated = Caller()
#self.device_discovery = Caller()
#self.device_error = Caller()
#self.althold_updated = Caller()
#self.alt1_updated = Caller()
#self.alt2_updated = Caller()
def get_supported_dev_count(self):
return 1
def add_device(self, dev, parameters):
logger.info("Adding device and opening it")
dev.open()
self._devs.append(dev)
def remove_device(self, dev):
self._devs.remove(dev)
dev.close()
def close(self):
"""Close down the MUX and close all it's devices"""
for d in self._devs:
d.close()
self._devs = []
def _cap_rp(self, rp):
ret = rp * self.max_rp_angle
if ret > self.max_rp_angle:
ret = self.max_rp_angle
elif ret < -1 * self.max_rp_angle:
ret = -1 * self.max_rp_angle
return ret
def _scale_rp(self, roll, pitch):
return [self._cap_rp(roll), self._cap_rp(pitch)]
def _scale_and_deadband_yaw(self, yaw):
return InputMux.deadband(yaw, 0.2) * self.max_yaw_rate
def _limit_thrust(self, thrust, althold, emergency_stop):
# Thust limiting (slew, minimum and emergency stop)
if self.springy_throttle:
if althold and self.has_pressure_sensor:
thrust = int(round(InputMux.deadband(thrust, 0.2)*32767 + 32767)) #Convert to uint16
else:
if thrust < 0.05 or emergency_stop:
thrust = 0
else:
thrust = self.min_thrust + thrust * (self.max_thrust -
self.min_thrust)
if (self.thrust_slew_enabled == True and
self.thrust_slew_limit > thrust and not
emergency_stop):
if self._old_thrust > self.thrust_slew_limit:
self._old_thrust = self.thrust_slew_limit
if thrust < (self._old_thrust - (self.thrust_slew_rate / 100)):
thrust = self._old_thrust - self.thrust_slew_rate / 100
if thrust < 0 or thrust < self.min_thrust:
thrust = 0
else:
thrust = thrust / 2 + 0.5
if althold and self.has_pressure_sensor:
#thrust = int(round(JoystickReader.deadband(thrust,0.2)*32767 + 32767)) #Convert to uint16
thrust = 32767
else:
if thrust < -0.90 or emergency_stop:
thrust = 0
else:
thrust = self.min_thrust + thrust * (self.max_thrust -
self.min_thrust)
if (self.thrust_slew_enabled == True and
self.thrust_slew_limit > thrust and not
emergency_stop):
if self._old_thrust > self.thrust_slew_limit:
self._old_thrust = self.thrust_slew_limit
if thrust < (self._old_thrust - (self.thrust_slew_rate / 100)):
thrust = self._old_thrust - self.thrust_slew_rate / 100
if thrust < -1 or thrust < self.min_thrust:
thrust = 0
self._old_thrust = thrust
self._old_raw_thrust = thrust
return thrust
def set_alt_hold_available(self, available):
"""Set if altitude hold is available or not (depending on HW)"""
self.input._has_pressure_sensor = available
def enable_alt_hold(self, althold):
"""Enable or disable altitude hold"""
self._old_alt_hold = althold
def _check_toggle(self, key, data):
if not key in self._prev_values:
self._prev_values[key] = data
elif self._prev_values[key] != data:
self._prev_values[key] = data
return True
return False
def _update_alt_hold(self, value):
if self._check_toggle("althold", value):
self.input.althold_updated.call(str(value))
def _update_em_stop(self, value):
if self._check_toggle("estop", value):
self.input.emergency_stop_updated.call(value)
def _update_alt1(self, value):
if self._check_toggle("alt1", value):
self.input.alt1_updated.call(value)
def _update_alt2(self, value):
if self._check_toggle("alt2", value):
self.input.alt2_updated.call(value)
def _trim_rp(self, roll, pitch):
return [roll + self.trim_roll, pitch + self.trim_pitch]
@staticmethod
def p2t(percentage):
"""Convert a percentage to raw thrust"""
return int(MAX_THRUST * (percentage / 100.0))
@staticmethod
def deadband(value, threshold):
if abs(value) < threshold:
value = 0
elif value > 0:
value -= threshold
elif value < 0:
value += threshold
return value/(1-threshold)
def read(self):
return None | qrohlf/cf-client | lib/cfclient/utils/mux/__init__.py | Python | gpl-2.0 | 7,589 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# parallel https://code.google.com/p/pysam/issues/detail?id=105
# import pp
import argparse
import multiprocessing as mp
import os
import pysam
from datetime import datetime
from pynnotator import settings
from subprocess import call
class Dbnsfp(object):
def __init__(self, vcf_file=None, cores=None):
self.vcf_file = vcf_file
self.dbnsfp_header = open('%s/dbnsfp/header.vcf' % (settings.data_dir)).readlines()
# print('self.resources', self.resources)
self.cores = int(cores)
self.filename = os.path.splitext(os.path.basename(str(vcf_file)))[0]
# create folder validator if it doesn't exists
if not os.path.exists('dbnsfp'):
os.makedirs('dbnsfp')
def run(self):
tstart = datetime.now()
print(tstart, 'Starting DBNSFP annotator: ', self.vcf_file)
# std = self.annotator()
self.splitvcf(self.vcf_file)
pool = mp.Pool()
pool.map(self.annotate, range(1, self.cores + 1))
# pool.close()
# pool.join()
prefix = 'dbnsfp'
# # Define your jobs
# jobs = []
final_parts = []
for n in range(0, self.cores):
index = n + 1
final_file = 'dbnsfp/dbnsfp.%s.vcf' % (index)
final_parts.append(final_file)
command = 'cat %s/header.vcf ' % (prefix) + " ".join(final_parts) + '> %s/dbnsfp.vcf' % (prefix)
call(command, shell=True)
command = 'rm %s/header.vcf %s/body.vcf %s/dbnsfp.*.vcf %s/part.*' % (prefix, prefix, prefix, prefix)
# run(command, shell=True)
tend = datetime.now()
annotation_time = tend - tstart
print(tend, 'Finished DBNSFP, it took: ', annotation_time)
def partition(self, lst, n):
division = len(lst) / float(n)
return [lst[int(round(division * i)): int(round(division * (i + 1)))] for i in range(n)]
def splitvcf(self, vcffile):
# print('split file', vcffile)
# print 'numero de cores', cores
prefix = 'dbnsfp'
vcf_reader = open('%s' % (vcffile))
header_writer = open('%s/header.vcf' % (prefix), 'w')
body_writer = open('%s/body.vcf' % (prefix), 'w')
count_lines = 0
for line in vcf_reader:
if line.startswith('#'):
if line.startswith('#CHROM'):
header_writer.writelines(self.dbnsfp_header)
header_writer.writelines(line)
else:
body_writer.writelines(line)
header_writer.close()
body_writer.close()
vcf_reader = open('%s/body.vcf' % (prefix))
groups = self.partition(list(vcf_reader.readlines()), self.cores)
for c, group in enumerate(groups):
# print 'group', len(group)
# print 'c', c
part = c + 1
part_writer = open('%s/part.%s.vcf' % (prefix, part), 'w')
for line in group:
part_writer.writelines(line)
part_writer.close()
# convert and annotate the vcf file to snpeff
def annotate(self, out_prefix):
# print 'Hello'
# print self.dbnsfp_reader
# header is at:
# 24 SIFT_score: SIFT score (SIFTori).
# 105 HUVEC_confidence_value: 0 - highly significant scores (approx. p<.003); 1 - significant scores
# 188 clinvar_rs: rs number from the clinvar data set
# 191 clinvar_golden_stars: ClinVar Review Status summary.
# print 'input',vcffile, out_prefix, dbnsfp
dbnsfp_reader = pysam.Tabixfile(settings.dbnsfp, 'r', encoding='utf-8')
# print('header')
for item in dbnsfp_reader.header:
header = item.strip().split('\t')
# header = dbnsfp_reader.header.next().strip().split('\t')
vcffile = 'dbnsfp/part.%s.vcf' % (out_prefix)
vcf_reader = open('%s' % (vcffile))
vcf_writer = open('dbnsfp/dbnsfp.%s.vcf' % (out_prefix), 'w', encoding="utf-8")
for line in vcf_reader:
if line.startswith('#'):
if line.startswith('#CHROM'):
vcf_writer.writelines(dbnsfp_header)
vcf_writer.writelines(line)
else:
variant = line.split('\t')
variant[0] = variant[0].replace('chr', '')
index = '%s-%s' % (variant[0], variant[1])
# print index
try:
records = dbnsfp_reader.fetch(variant[0], int(variant[1]) - 1, int(variant[1]),multiple_iterators=True)
except:
records = []
for record in records:
ann = record.strip().split('\t')
ispresent = False
if variant[3] == ann[2]:
alts = variant[4].split(',')
alts_ann = ann[3].split(',')
# compare ALT
for alt in alts:
if alt in alts_ann:
ispresent = True
if ispresent:
new_ann = []
for k, item in enumerate(header):
idx = k
if ann[idx] != '.':
new_ann.append('dbNSFP_%s=%s' % (item, ann[idx].replace(';', '|')))
variant[7] = '%s;%s' % (variant[7], ";".join(new_ann))
vcf_writer.writelines("\t".join(variant))
vcf_writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Annotate a VCF File with DbNSFP.')
parser.add_argument('-i', dest='vcf_file', required=True, metavar='example.vcf', help='a VCF file to be annotated')
parser.add_argument('-n', dest='cores', required=True, metavar='4', help='number of cores to use')
args = parser.parse_args()
dbnfsp = Dbnsfp(args.vcf_file, args.cores)
dbnfsp.run()
| raonyguimaraes/pynnotator | pynnotator/helpers/dbnsfp.py | Python | bsd-3-clause | 6,048 |
from particles.dataTypes import Integer, Double, DoubleTensor
from collisions.function import GradGraph
from particles import ops
import numpy as np
import unittest
def divtest():
x = Integer("Int1")
y = Integer("Int2")
z = x / y
graph = GradGraph(z)
output = graph.getOutput({x: 6,
y: 2})
return output == 3
def simpSum():
x = Integer("Int1")
y = x + 3
graph = GradGraph(y)
output = graph.getOutput({x: 1})
return output == 4
def simpSub():
x = Integer("Int1")
y = x - 3
graph = GradGraph(y)
output = graph.getOutput({x: 1})
return output == -2
def simpMul():
x = Integer("Int1")
y = x * 3
graph = GradGraph(y)
output = graph.getOutput({x: 2})
return output == 6
def simpDiv():
x = Integer("Int1")
y = x / 4
graph = GradGraph(y)
output = graph.getOutput({x: 8})
return output == 2
# Tensor Test
def TensorSum():
x = DoubleTensor("Tensor1")
y = x + 3
graph = GradGraph(y)
output = graph.getOutput({x: 1})
return output == 4
def TensorOp():
x = DoubleTensor("Tensor1")
y = x - [3, 4]
z = ops.log(y * x)
graph = GradGraph(z)
output = graph.getOutput({x: [10]})
assert(np.all(np.isclose(output, np.log(10 * (10 - np.asarray([3, 4]))))))
graph.getGradients(wrt=x)
a = 2 * 10 - np.asarray([3, 4])
b = 1.0 / np.exp(np.asarray(output))
return np.all(np.isclose(x.gradient, a * b))
def dotProduct():
x = DoubleTensor("Tensor1")
y = x.dot([3, 4])
z = y.dot([4, 5])
graph = GradGraph(z)
output = graph.getOutput({x: [3, 4]})
graph.getGradients(wrt=x)
flag1 = np.all(output == [100, 125])
flag2 = np.all(x.gradient == [[12., 16.], [15., 20.]])
return flag1 and flag2
def test1():
x = Integer("Int1")
y = Integer("Int2")
z = Integer("Int3")
p = Integer("Int4")
k = p + z
kd = k * 2
t = x - kd
td = t - 2
s = td * z
sd = s / 5
graph = GradGraph(sd)
output = graph.getOutput({x: 36,
y: 2,
z: 3,
p: 9})
return output == 6
def gradTestSimple():
a = Integer("a")
b = Integer("b")
e = (a + b) * (b + 1)
graph = GradGraph(e)
graph.getOutput({a: 2,
b: 1})
graph.getGradients(wrt=b)
return b.gradient == 5
def gradTestShort():
x = Integer("Int1x")
y = Integer("Int2y")
z = Integer("Int3z")
p = Integer("Int4p")
k = p * z
t = y * k
m = k + t
n = m * z
graph = GradGraph(n)
graph.getOutput({x: 9,
y: 9,
z: 9,
p: 2})
graph.getGradients(wrt=z)
return z.gradient == 360
def gradTestLong():
x = Integer("Int1x")
y = Integer("Int2y")
z = Integer("Int3z")
p = Integer("Int4p")
k = p * z
n = (k + (y * p * z)) * z
graph = GradGraph(n)
graph.getOutput({x: 9,
y: 9,
z: 9,
p: 2})
graph.getGradients(wrt=z)
return True
def testOps():
x = Integer('x')
y = ops.log(x)
z = ops.exp(y)
graph = GradGraph(z)
graph.getOutput({x: 1})
graph.getGradients(wrt=x)
return x.gradient == 1
def activ_fns():
x = Double('x')
z = ops.sigmoid(x)
graph = GradGraph(z)
graph.getOutput({x: 110.5})
graph.getGradients(wrt=x)
return x.gradient == 0
class TestCase(unittest.TestCase):
def test_simp_sum(self):
""" Simple Summation Test"""
self.assertTrue(simpSum())
def test_simp_sub(self):
""" Simple Subtraction Test"""
self.assertTrue(simpSub())
def test_simp_mul(self):
""" Simple Multiplication Test"""
self.assertTrue(simpMul())
def test_simp_div(self):
""" Simple Division Test"""
self.assertTrue(simpDiv())
def test_test1(self):
""" Miscellaneous Test"""
self.assertTrue(test1())
def test_div(self):
""" Division Test"""
self.assertTrue(divtest())
def test_short_grad(self):
""" Short Grad Test"""
self.assertTrue(gradTestShort())
def test_long_grad(self):
""" Long Gradient Test"""
self.assertTrue(gradTestLong())
def test_simp_grad(self):
""" Simple gradient test"""
self.assertTrue(gradTestSimple())
def test_ops(self):
""" Test Ops"""
self.assertTrue(testOps())
def test_activ_fn(self):
""" Activation Function test"""
self.assertTrue(activ_fns())
def test_tensor_sum(self):
""" Tensor Sum test"""
self.assertTrue(TensorSum())
def test_tensor_op(self):
""" Tensor operations test"""
self.assertTrue(TensorOp())
def test_dot_prod(self):
""" Dot Product"""
self.assertTrue(dotProduct())
def run_test():
suite = unittest.TestLoader().loadTestsFromTestCase(TestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
run_test()
| amartya18x/easyGrad | easyGrad/test.py | Python | apache-2.0 | 5,129 |
import django
import pytest
from django.db import models
from psqlextra.fields import HStoreField
from .fake_model import get_fake_model
@pytest.fixture
def model():
"""Test models, where the first model has a foreign key relationship to the
second."""
return get_fake_model({"title": HStoreField()})
@pytest.fixture
def modelobj(model):
"""Data for the test models, one row per model."""
return model.objects.create(title={"en": "english", "ar": "arabic"})
def test_query_values_hstore(model, modelobj):
"""Tests that selecting all the keys properly works and returns a.
:see:LocalizedValue instance.
"""
result = list(model.objects.values("title"))[0]
assert result["title"] == modelobj.title
def test_query_values_hstore_key(model, modelobj):
"""Tests whether selecting a single key from a :see:HStoreField using the
query set's .values() works properly."""
result = list(model.objects.values("title__en", "title__ar"))[0]
assert result["title__en"] == modelobj.title["en"]
assert result["title__ar"] == modelobj.title["ar"]
def test_query_values_list_hstore_key(model, modelobj):
"""Tests that selecting a single key from a :see:HStoreField using the
query set's .values_list() works properly."""
result = list(model.objects.values_list("title__en", "title__ar"))[0]
assert result[0] == modelobj.title["en"]
assert result[1] == modelobj.title["ar"]
@pytest.mark.skipif(
django.VERSION < (2, 1), reason="requires django 2.1 or newer"
)
def test_query_values_hstore_key_through_fk():
"""Tests whether selecting a single key from a :see:HStoreField using the
query set's .values() works properly when there's a foreign key
relationship involved."""
# this starting working in django 2.1
# see: https://github.com/django/django/commit/20bab2cf9d02a5c6477d8aac066a635986e0d3f3
fmodel = get_fake_model({"name": HStoreField()})
model = get_fake_model(
{"fk": models.ForeignKey(fmodel, on_delete=models.CASCADE)}
)
fobj = fmodel.objects.create(name={"en": "swen", "ar": "arabic swen"})
model.objects.create(fk=fobj)
result = list(model.objects.values("fk__name__ar"))[0]
assert result["fk__name__ar"] == fobj.name["ar"]
| SectorLabs/django-postgres-extra | tests/test_query_values.py | Python | mit | 2,282 |
from django.contrib import admin
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from registration.models import RegistrationProfile
class RegistrationAdmin(admin.ModelAdmin):
actions = ['activate_users', 'resend_activation_email']
list_display = ('user', 'activation_key_expired')
raw_id_fields = ['user']
search_fields = ('user__username', 'user__first_name', 'user__last_name')
def activate_users(self, request, queryset):
"""
Activates the selected users, if they are not alrady
activated.
"""
for profile in queryset:
RegistrationProfile.objects.activate_user(profile.activation_key)
activate_users.short_description = _("Activate users")
def resend_activation_email(self, request, queryset):
"""
Re-sends activation emails for the selected users.
Note that this will *only* send activation emails for users
who are eligible to activate; emails will not be sent to users
whose activation keys have expired or who have already
activated.
"""
#if Site._meta.installed:
site = Site.objects.get_current()
#else:
# site = RequestSite(request)
for profile in queryset:
if not profile.activation_key_expired():
profile.send_activation_email(site)
resend_activation_email.short_description = _("Re-send activation emails")
admin.site.register(RegistrationProfile, RegistrationAdmin)
| xlk521/cloudguantou | registration/admin.py | Python | bsd-3-clause | 1,632 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#file:convert.py
import sqlite3
import re
import logging
import json
from database import Database
logger = logging.getLogger(__name__)
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(file="sqlconvert.log", format=FORMAT, level=logging.DEBUG)
__doc__='''Utility to mapping SQL file into a JSON file'''
# for test purpose
result_path = "./"
#for test purpose: from form selected TABLES
SELECTED_TABLES = []
class SQLConverter(object):
def __init__(self, filename):
'''connect to db using .db file'''
self.conn = sqlite3.connect(filename)
self.filename = filename
self.db_name = re.split("/|\.",filename)[-2]
self.data = {}
#into MONGODB
self.db = Database(self.db_name)
def __connect__(self, filename):
try:
self.conn = sqlite3.connect(filename)
except:
logging.warning("Failed to connect to dbfile %s. No such a file" %filename)
def __close__(self):
'''close current connection'''
logging.info("Closing connection to db %s" %self.filename)
return self.conn.close()
def list_tables(self):
'''list all autorized tables that has id and data in it and are in SELECTED_TABLE'''
curs = self.conn.cursor()
curs.execute("SELECT * FROM sqlite_master WHERE type='table';")
#print len([t[1] for t in curs.fetchall()])
self.tables = list()
for t in curs.fetchall():
print "Examining", t[1]
#print set(self.get_keys(t[1]))
data = set(self.get_keys(t[1]))
if set(["data", "id"]) <= data:
self.tables.append(t[1])
return self.tables
def get_values(self, key, table):
'''given a key in a table access to the value'''
curs = self.conn.cursor()
cmd = "SELECT %s FROM %s;" %(key,table)
curs.execute(cmd)
self.values = [v[0] for v in curs.fetchall()]
return self.values
def get_keys(self, table):
'''given a table get the keys (headers of the table)'''
curs = self.conn.cursor()
cmd = "SELECT sql from sqlite_master WHERE type = 'table' and name = '%s';" %table
curs.execute(cmd)
line = curs.fetchone()[0]
self.keys = []
self.data_type = []
raw_keys = re.search("\((\s)?(.*?)(\s)?\)", line)
if raw_keys is not None:
keys = raw_keys.group(2).split(", ")
for k in keys:
key_list = k.split(" ", 1)
try:
self.data_type.append(key_list[1])
self.keys.append(key_list[0])
except IndexError:
self.keys.extend(k.split(","))
for k in k.split(","):
self.data_type.append(" ")
return self.keys
def get_data_table(self, table):
'''given a selected table return a dict of key (header) and value of the table'''
data = []
headers = []
keys = self.get_keys(table)
keys_h = ",".join(keys)
#headers.append((table, len(keys), keys_h))
curs = self.conn.cursor()
cmd = "SELECT %s FROM %s" %(keys_h, table)
#try:
curs.execute(cmd)
for values in curs.fetchall():
data.append(dict(zip(keys, values)))
return data
def convert(self):
'''join the data given an id mapp or aggregation?'''
self.db.create_coll("results")
for id in self.db.data.distinct("id"):
for data in self.db.data.find({"id": id}):
print data
break
def get_data(self):
'''for now it insert raw file into MongoDB'''
self.db.create_coll("data")
for table in self.tables:
self.db.data.insert(self.get_data_table(table))
return self.db.data.count()
def populate_items(self, _id, table):
output = {}
curs = self.conn.cursor()
cmd = "SELECT * FROM %s WHERE id='%d';" %(table, _id)
self.get_keys(table)
try:
curs.execute(cmd)
output[_id] = {"tables": self.tables}
for n in curs.fetchall():
data = zip(self.keys,list(n))
output[_id] = [{k:v} for k,v in data]
except sqlite3.OperationalError:
cmd = "SELECT * FROM %s WHERE _id='%d';" %(table, _id)
try:
curs.execute(cmd)
for n in curs.fetchall():
data = zip(self.keys,list(n))
id = [{k:v} for k,v in data]
except sqlite3.OperationalError:
cmd = "SELECT * FROM %s WHERE id0='%d';" %(table, _id)
try:
curs.execute(cmd)
for n in curs.fetchall():
data = zip(self.keys,list(n))
print [{k:v} for k,v in data]
except sqlite3.OperationalError:
print "Warning, DB_table %s has no id" %table
def export2json(self):
data = {}
#Lister l'ensemble des clés (les headers de chaque colonne)
for tid, table in enumerate(self.tables):
for xid,xdata in enumerate(self.get_data(table)):
data[xid] = {"tbl_name": table,"tid":tid, "id":xid, "data": xdata}
data_sources = "deep.txt"
feed=open(data_sources,'w')
#feed = open('sources/deepm2.txt','w')
feed.write(json.dumps(data, sort_keys=True,indent=4, separators=(',', ': ')))
def populate(self):
'''Retourner n'imprte quel objet selon son unité minimale de stockage'''
data = {}
#mapping data
for table in self.tables:
for tid,values in self.get_data(table):
#print "id_table",tid, values
# data[tid] = {"tbl_name": table, "tid":tid, "data": values}
for kid, k in enumerate(self.get_keys(table)):
#print "Key", k
for vid, v in enumerate(self.get_values(k,table)):
#numero de table, numero de clé, numero de valeur, clé, valeur
print {"table_id":tid,"table_name": table, "key_id": kid,"key":k, "value_id":vid, "value":v}
# data[vid].update({k:v})
# with open("deep.txt", "w") as f:
# print data[xid]
#export to file
# data_sources = "deep.txt"
# feed=open(data_sources,'w')
# #feed = open('sources/deepm2.txt','w')
# feed.write(json.dumps(data, sort_keys=True,indent=4, separators=(',', ': ')))
return
if __name__=="__main__":
pass
#~ db = SQLConverter("cop-clean.db")
#~ db.populate()
| cortext/antonio | sql2json.py | Python | mit | 5,710 |
from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.xgboost import H2OXGBoostEstimator
#----------------------------------------------------------------------
# Purpose: Smoke-test basic XGBoost operation under Hadoop.
#----------------------------------------------------------------------
def createData(nrows, ncols):
hdfs_name_node = pyunit_utils.hadoop_namenode()
hdfs_airlines_file = "/datasets/airlines_all.05p.csv"
url = "hdfs://{0}{1}".format(hdfs_name_node, hdfs_airlines_file)
airlines = h2o.import_file(url)
myX = ["Year", "Month", "DayofMonth", "DayOfWeek", "Distance"]
myY = "IsDepDelayed"
allCols = list(myX)
allCols.append(myY)
airlines = airlines[allCols]
num_new_features = ncols - airlines.ncol
sample_data = h2o.create_frame(rows = nrows, cols = num_new_features, categorical_fraction = 0,
seed = 1234, seed_for_column_types = 1234)
new_rows = nrows - airlines.nrow
if (nrows > 0):
extra_rows = airlines[0:nrows, : ]
airlines = airlines.rbind(extra_rows)
airlines = airlines[0:nrows, : ]
full_data = airlines.cbind(sample_data)
return full_data
def xgboost_estimation():
if ("XGBoost" not in h2o.cluster().list_all_extensions()):
print("XGBoost extension is not present. Skipping test. . .")
return
# Check if we are running inside the H2O network by seeing if we can touch
# the namenode.
hadoop_namenode_is_accessible = pyunit_utils.hadoop_namenode_is_accessible()
if not hadoop_namenode_is_accessible:
raise EnvironmentError("Hadoop namenode is not accessible")
hdfs_name_node = pyunit_utils.hadoop_namenode()
full_data = createData(500000, 500)
myX = list(full_data.col_names)
myX.remove("IsDepDelayed")
xgb = H2OXGBoostEstimator(seed = 42, tree_method = "approx")
xgboost_model = xgb.train(y = "IsDepDelayed", x = myX[0:480], training_frame = full_data, model_id = "xgboost")
print(xgboost_model)
pred = predict(xgboost_model, full_data)
perf = h2o.performance(xgboost_model, full_data)
return perf
if __name__ == "__main__":
pyunit_utils.standalone_test(xgboost_estimation)
else:
xgboost_estimation()
| spennihana/h2o-3 | h2o-py/tests/testdir_hdfs/pyunit_INTERNAL_XGBoostEstimation.py | Python | apache-2.0 | 2,346 |
from .pyfirmata import *
from .boards import BOARDS
__version__ = '1.0.3' # Use bumpversion!
# shortcut classes
class Arduino(Board):
"""
A board that will set itself up as a normal Arduino.
"""
def __init__(self, *args, **kwargs):
args = list(args)
args.append(BOARDS['arduino'])
super(Arduino, self).__init__(*args, **kwargs)
def __str__(self):
return "Arduino {0.name} on {0.sp.port}".format(self)
class ArduinoMega(Board):
"""
A board that will set itself up as an Arduino Mega.
"""
def __init__(self, *args, **kwargs):
args = list(args)
args.append(BOARDS['arduino_mega'])
super(ArduinoMega, self).__init__(*args, **kwargs)
def __str__(self):
return "Arduino Mega {0.name} on {0.sp.port}".format(self)
class ArduinoDue(Board):
"""
A board that will set itself up as an Arduino Due.
"""
def __init__(self, *args, **kwargs):
args = list(args)
args.append(BOARDS['arduino_due'])
super(ArduinoDue, self).__init__(*args, **kwargs)
def __str__(self):
return "Arduino Due {0.name} on {0.sp.port}".format(self)
| jochasinga/pyFirmata | pyfirmata/__init__.py | Python | mit | 1,176 |
import pytest
import pandas as pd
from lcdblib.pandas import utils
@pytest.fixture(scope='session')
def sample_table():
metadata = {
'sample': ['one', 'two'],
'tissue': ['ovary', 'testis']
}
return pd.DataFrame(metadata)
def test_cartesian_df(sample_table):
df2 = pd.DataFrame({'num': [100, 200]})
result = utils.cartesian_product(sample_table, df2)
# Compare a slice
sf = result.iloc[0, :].sort_index()
sf.name = ''
test_sf = pd.Series({'sample': 'one', 'tissue': 'ovary', 'num': 100}, name='').sort_index()
assert sf.equals(test_sf)
assert result.shape == (4, 3)
def test_cartesian_sf(sample_table):
sf2 = pd.Series([100, 200], name='num')
result = utils.cartesian_product(sample_table, sf2)
# Compare a slice
sf = result.iloc[0, :].sort_index()
sf.name = ''
test_sf = pd.Series({'sample': 'one', 'tissue': 'ovary', 'num': 100}, name='').sort_index()
assert sf.equals(test_sf)
assert result.shape == (4, 3)
def test_cartesian_dict(sample_table):
df2 = {'num': [100, 200]}
result = utils.cartesian_product(sample_table, df2)
# Compare a slice
sf = result.iloc[0, :].sort_index()
sf.name = ''
test_sf = pd.Series({'sample': 'one', 'tissue': 'ovary', 'num': 100}, name='').sort_index()
assert sf.equals(test_sf)
assert result.shape == (4, 3)
| lcdb/lcdblib | tests/test_pandas_utils.py | Python | mit | 1,376 |
from Constants import *
from LoewnerRunFactory import LoewnerRunFactory
from prompt_toolkit import PromptSession
from os import popen
from numpy import arange, linspace
from InterfaceMode import *
class CommandLineInterface:
def __init__(self):
# Create a dictionary of input-function pairs
self.basic_responses = {
HELP_FULL : self.print_help_message,
HELP_SHORT : self.print_help_message,
QUIT_FULL : self.exit_loewner,
QUIT_SHORT : self.exit_loewner,
EXIT : self.exit_loewner,
}
# Create a dictionary of input-message pairs for the help command
self.help_responses = {
HELP_FULL : HELPMSG,
HELP_SHORT : HELPMSG,
}
# Create a dictionary of input-object pairs for the main algorithms/modes
self.algorithm_responses = {
FORWARD_SINGLE_MODE : ForwardSingle,
INVERSE_SINGLE_MODE : InverseSingle,
EXACT_INVERSE_MODE : ExactInverse,
TWO_TRACE_MODE : TwoTrace,
WEDGE_TRACE_MODE : WedgeAlpha,
EXACT_LINEAR : ExactLinear,
EXACT_CONST : ExactConstant,
EXACT_SQRT : ExactSquareRoot,
}
# Create prompt object.
self.session = PromptSession(message=LOEWNER_PROMPT)
# Find the size of the terminal
rows, columns = popen('stty size', 'r').read().split()
# Declare a string for placing text in the center of the terminal
self.shift_string = "{:^" + str(columns) + "}"
# Create an empty variable for representing the program mode
self.program_mode = None
def exit_loewner(self,unused_arg):
# Exit the program
exit()
def is_blank(self,user_input):
return user_input == ""
def print_help_message(self,msg_type=HELPMSG):
# Determine the location of the help file
help_file_loc = self.help_responses[msg_type]
# Open the help file
help_file = open(help_file_loc)
# Print the help file
for line in help_file:
print(line.strip('\n'))
# Add a blank line to make things look neater
print("")
# Clear the input to indicate that it was interpreted successfully
return ""
def special_text(self, style, string):
styles = {"PURPLE" : '\033[95m',
"CYAN" : '\033[96m',
"DARKCYAN" : '\033[36m',
"BLUE" : '\033[94m',
"GREEN" : '\033[92m',
"YELLOW" : '\033[93m',
"RED" : '\033[91m',
"BOLD" : '\033[1m',
"UNDERLINE" : '\033[4m',}
return styles[style] + string + '\033[0m'
def standard_input(self):
# Await the 'standard' input - help/quit/print driving functions/etc
while True:
user_input = self.session.prompt()
if user_input in self.basic_responses:
user_input = self.basic_responses[user_input](user_input)
return user_input
def bad_input_message(self,user_input):
# Print a message for unexpected inputs
print("Unrecognised instruction: " + user_input + " (Press h for help)")
def run_algorithm(self,user_input):
return user_input == START_ALG
# Run the forward and inverse single-trace algorithms
def run_loewner(self):
# Run the prompt
while True:
# Await under input
user_input = self.standard_input()
# Continue if one of the standard inputs or a blank line was entered
if self.is_blank(user_input):
continue
# Check for 'go back' instruction
if user_input in BACK_COMMANDS:
return
# Attempt to change the LoewenerRunFactory parameters
if self.program_mode.change_parameters(user_input):
continue
# Get a list of driving functions (if any were given)
if self.program_mode.change_driving_functions(user_input):
continue
# Print the error message if something went wrong
if self.program_mode.show_error(user_input):
continue
# Check if the start command was given
if self.run_algorithm(user_input):
# Validate the run parameters that were given and create a LoewnerRunFactory
if not self.program_mode.validate_settings():
print("Could not create validate configuration: Bad or incomplete parameters given. Enter 'error' for more information.")
continue
else:
print("Successfully validated configuration. Executing runs...")
self.program_mode.execute()
return
# Print the bad input message
self.bad_input_message(user_input)
def show_start_screen(self):
# Open the start message file
start_file = open(START_MSG)
# Print the help file
for line in start_file:
print(line.strip('\n'))
# Add a blank line to make things look neater
print("")
print(self.shift_string.format(self.special_text("BOLD","*Loewner's Evolutions*")))
print(self.shift_string.format("Numerical and exact solutions to Loewner's equation for two and single-trace evolutions."))
# Add a blank line to make things look neater
print("")
# Clear the input to indicate that it was interpreted successfully
return ""
def start(self):
# Show a start screen
self.show_start_screen()
# Run the prompt
while True:
# Check if the input matches with the 'standard' commands
user_input = self.standard_input()
# Continue if one of the standard commands was entered
if self.is_blank(user_input):
continue
# Check if in the response correponds with any of the Loewner algorithms
if user_input in self.algorithm_responses:
# Create a program mode object
self.program_mode = self.algorithm_responses[user_input]()
# Prepare and execute the algorithm of the current mode
self.run_loewner()
# Delete the program mode object to allow the use of a different algorithm
self.program_mode = None
continue
# Print a message if an invalid response was given
self.bad_input_message(user_input)
| ucapdak/loewner | main/PythonTools/Interface.py | Python | mit | 7,161 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import json
import logging
import re
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
from distutils.version import LooseVersion
from multiprocessing.pool import ThreadPool
from typing import Any, cast, Dict, Iterable, List, Optional, Set, Tuple, Union
import pandas as pd
import sqlalchemy as sa
from dateutil.parser import parse as dparse
from flask import escape, Markup
from flask_appbuilder import Model
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.security.sqla.models import User
from flask_babel import lazy_gettext as _
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
String,
Table,
Text,
UniqueConstraint,
)
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref, relationship, Session
from sqlalchemy.sql import expression
from sqlalchemy_utils import EncryptedType
from superset import conf, db, is_feature_enabled, security_manager
from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
from superset.constants import NULL_STRING
from superset.exceptions import SupersetException
from superset.models.core import Database
from superset.models.helpers import AuditMixinNullable, ImportMixin, QueryResult
from superset.typing import FilterValues, Granularity, Metric, QueryObjectDict
from superset.utils import core as utils, import_datasource
try:
import requests
from pydruid.client import PyDruid
from pydruid.utils.aggregators import count
from pydruid.utils.dimensions import (
MapLookupExtraction,
RegexExtraction,
RegisteredLookupExtraction,
TimeFormatExtraction,
)
from pydruid.utils.filters import Bound, Dimension, Filter
from pydruid.utils.having import Aggregation, Having
from pydruid.utils.postaggregator import (
Const,
Field,
HyperUniqueCardinality,
Postaggregator,
Quantile,
Quantiles,
)
except ImportError:
pass
try:
from superset.utils.core import DimSelector, DTTM_ALIAS, FilterOperator, flasher
except ImportError:
pass
IS_SIP_38 = is_feature_enabled("SIP_38_VIZ_REARCHITECTURE")
DRUID_TZ = conf.get("DRUID_TZ")
POST_AGG_TYPE = "postagg"
metadata = Model.metadata # pylint: disable=no-member
logger = logging.getLogger(__name__)
try:
# Postaggregator might not have been imported.
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name: str, field_names: List[str], function: str) -> None:
self.post_aggregator = {
"type": "javascript",
"fieldNames": field_names,
"name": name,
"function": function,
}
self.name = name
class CustomPostAggregator(Postaggregator):
"""A way to allow users to specify completely custom PostAggregators"""
def __init__(self, name: str, post_aggregator: Dict[str, Any]) -> None:
self.name = name
self.post_aggregator = post_aggregator
except NameError:
pass
# Function wrapper because bound methods cannot
# be passed to processes
def _fetch_metadata_for(datasource: "DruidDatasource") -> Optional[Dict[str, Any]]:
return datasource.latest_metadata()
class DruidCluster(Model, AuditMixinNullable, ImportMixin):
"""ORM object referencing the Druid clusters"""
__tablename__ = "clusters"
type = "druid"
id = Column(Integer, primary_key=True)
verbose_name = Column(String(250), unique=True)
# short unique name, used in permissions
cluster_name = Column(String(250), unique=True, nullable=False)
broker_host = Column(String(255))
broker_port = Column(Integer, default=8082)
broker_endpoint = Column(String(255), default="druid/v2")
metadata_last_refreshed = Column(DateTime)
cache_timeout = Column(Integer)
broker_user = Column(String(255))
broker_pass = Column(EncryptedType(String(255), conf.get("SECRET_KEY")))
export_fields = [
"cluster_name",
"broker_host",
"broker_port",
"broker_endpoint",
"cache_timeout",
"broker_user",
]
update_from_object_fields = export_fields
export_children = ["datasources"]
def __repr__(self) -> str:
return self.verbose_name if self.verbose_name else self.cluster_name
def __html__(self) -> str:
return self.__repr__()
@property
def data(self) -> Dict[str, Any]:
return {"id": self.id, "name": self.cluster_name, "backend": "druid"}
@staticmethod
def get_base_url(host: str, port: int) -> str:
if not re.match("http(s)?://", host):
host = "http://" + host
url = "{0}:{1}".format(host, port) if port else host
return url
def get_base_broker_url(self) -> str:
base_url = self.get_base_url(self.broker_host, self.broker_port)
return f"{base_url}/{self.broker_endpoint}"
def get_pydruid_client(self) -> "PyDruid":
cli = PyDruid(
self.get_base_url(self.broker_host, self.broker_port), self.broker_endpoint
)
if self.broker_user and self.broker_pass:
cli.set_basic_auth_credentials(self.broker_user, self.broker_pass)
return cli
def get_datasources(self) -> List[str]:
endpoint = self.get_base_broker_url() + "/datasources"
auth = requests.auth.HTTPBasicAuth(self.broker_user, self.broker_pass)
return json.loads(requests.get(endpoint, auth=auth).text)
def get_druid_version(self) -> str:
endpoint = self.get_base_url(self.broker_host, self.broker_port) + "/status"
auth = requests.auth.HTTPBasicAuth(self.broker_user, self.broker_pass)
return json.loads(requests.get(endpoint, auth=auth).text)["version"]
@property # type: ignore
@utils.memoized
def druid_version(self) -> str:
return self.get_druid_version()
def refresh_datasources(
self,
datasource_name: Optional[str] = None,
merge_flag: bool = True,
refresh_all: bool = True,
) -> None:
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
ds_list = self.get_datasources()
denylist = conf.get("DRUID_DATA_SOURCE_DENYLIST", [])
ds_refresh: List[str] = []
if not datasource_name:
ds_refresh = list(filter(lambda ds: ds not in denylist, ds_list))
elif datasource_name not in denylist and datasource_name in ds_list:
ds_refresh.append(datasource_name)
else:
return
self.refresh(ds_refresh, merge_flag, refresh_all)
def refresh(
self, datasource_names: List[str], merge_flag: bool, refresh_all: bool
) -> None:
"""
Fetches metadata for the specified datasources and
merges to the Superset database
"""
session = db.session
ds_list = (
session.query(DruidDatasource)
.filter(DruidDatasource.cluster_id == self.id)
.filter(DruidDatasource.datasource_name.in_(datasource_names))
)
ds_map = {ds.name: ds for ds in ds_list}
for ds_name in datasource_names:
datasource = ds_map.get(ds_name, None)
if not datasource:
datasource = DruidDatasource(datasource_name=ds_name)
with session.no_autoflush:
session.add(datasource)
flasher(_("Adding new datasource [{}]").format(ds_name), "success")
ds_map[ds_name] = datasource
elif refresh_all:
flasher(_("Refreshing datasource [{}]").format(ds_name), "info")
else:
del ds_map[ds_name]
continue
datasource.cluster = self
datasource.merge_flag = merge_flag
session.flush()
# Prepare multithreaded executation
pool = ThreadPool()
ds_refresh = list(ds_map.values())
metadata = pool.map(_fetch_metadata_for, ds_refresh)
pool.close()
pool.join()
for i in range(0, len(ds_refresh)):
datasource = ds_refresh[i]
cols = metadata[i]
if cols:
col_objs_list = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(cols.keys()))
)
col_objs = {col.column_name: col for col in col_objs_list}
for col in cols:
if col == "__time": # skip the time column
continue
col_obj = col_objs.get(col)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id, column_name=col
)
with session.no_autoflush:
session.add(col_obj)
col_obj.type = cols[col]["type"]
col_obj.datasource = datasource
if col_obj.type == "STRING":
col_obj.groupby = True
col_obj.filterable = True
datasource.refresh_metrics()
session.commit()
@hybrid_property
def perm(self) -> str:
return f"[{self.cluster_name}].(id:{self.id})"
@perm.expression # type: ignore
def perm(cls) -> str: # pylint: disable=no-self-argument
return "[" + cls.cluster_name + "].(id:" + expression.cast(cls.id, String) + ")"
def get_perm(self) -> str:
return self.perm # type: ignore
@property
def name(self) -> str:
return self.verbose_name or self.cluster_name
@property
def unique_name(self) -> str:
return self.verbose_name or self.cluster_name
sa.event.listen(DruidCluster, "after_insert", security_manager.set_perm)
sa.event.listen(DruidCluster, "after_update", security_manager.set_perm)
class DruidColumn(Model, BaseColumn):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = "columns"
__table_args__ = (UniqueConstraint("column_name", "datasource_id"),)
datasource_id = Column(Integer, ForeignKey("datasources.id"))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
"DruidDatasource",
backref=backref("columns", cascade="all, delete-orphan"),
enable_typechecks=False,
)
dimension_spec_json = Column(Text)
export_fields = [
"datasource_id",
"column_name",
"is_active",
"type",
"groupby",
"filterable",
"description",
"dimension_spec_json",
"verbose_name",
]
update_from_object_fields = export_fields
export_parent = "datasource"
def __repr__(self) -> str:
return self.column_name or str(self.id)
@property
def expression(self) -> str:
return self.dimension_spec_json
@property
def dimension_spec(self) -> Optional[Dict[str, Any]]:
if self.dimension_spec_json:
return json.loads(self.dimension_spec_json)
return None
def get_metrics(self) -> Dict[str, "DruidMetric"]:
metrics = {
"count": DruidMetric(
metric_name="count",
verbose_name="COUNT(*)",
metric_type="count",
json=json.dumps({"type": "count", "name": "count"}),
)
}
return metrics
def refresh_metrics(self) -> None:
"""Refresh metrics based on the column metadata"""
metrics = self.get_metrics()
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == self.datasource_id)
.filter(DruidMetric.metric_name.in_(metrics.keys()))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
dbmetric = dbmetrics.get(metric.metric_name)
if dbmetric:
for attr in ["json", "metric_type"]:
setattr(dbmetric, attr, getattr(metric, attr))
else:
with db.session.no_autoflush:
metric.datasource_id = self.datasource_id
db.session.add(metric)
@classmethod
def import_obj(cls, i_column: "DruidColumn") -> "DruidColumn":
def lookup_obj(lookup_column: DruidColumn) -> Optional[DruidColumn]:
return (
db.session.query(DruidColumn)
.filter(
DruidColumn.datasource_id == lookup_column.datasource_id,
DruidColumn.column_name == lookup_column.column_name,
)
.first()
)
return import_datasource.import_simple_obj(db.session, i_column, lookup_obj)
class DruidMetric(Model, BaseMetric):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = "metrics"
__table_args__ = (UniqueConstraint("metric_name", "datasource_id"),)
datasource_id = Column(Integer, ForeignKey("datasources.id"))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship(
"DruidDatasource",
backref=backref("metrics", cascade="all, delete-orphan"),
enable_typechecks=False,
)
json = Column(Text, nullable=False)
export_fields = [
"metric_name",
"verbose_name",
"metric_type",
"datasource_id",
"json",
"description",
"d3format",
"warning_text",
]
update_from_object_fields = export_fields
export_parent = "datasource"
@property
def expression(self) -> Column:
return self.json
@property
def json_obj(self) -> Dict[str, Any]:
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self) -> Optional[str]:
return (
("{parent_name}.[{obj.metric_name}](id:{obj.id})").format(
obj=self, parent_name=self.datasource.full_name
)
if self.datasource
else None
)
def get_perm(self) -> Optional[str]:
return self.perm
@classmethod
def import_obj(cls, i_metric: "DruidMetric") -> "DruidMetric":
def lookup_obj(lookup_metric: DruidMetric) -> Optional[DruidMetric]:
return (
db.session.query(DruidMetric)
.filter(
DruidMetric.datasource_id == lookup_metric.datasource_id,
DruidMetric.metric_name == lookup_metric.metric_name,
)
.first()
)
return import_datasource.import_simple_obj(db.session, i_metric, lookup_obj)
druiddatasource_user = Table(
"druiddatasource_user",
metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("ab_user.id")),
Column("datasource_id", Integer, ForeignKey("datasources.id")),
)
class DruidDatasource(Model, BaseDatasource):
"""ORM object referencing Druid datasources (tables)"""
__tablename__ = "datasources"
__table_args__ = (UniqueConstraint("datasource_name", "cluster_id"),)
type = "druid"
query_language = "json"
cluster_class = DruidCluster
metric_class = DruidMetric
column_class = DruidColumn
owner_class = security_manager.user_model
baselink = "druiddatasourcemodelview"
# Columns
datasource_name = Column(String(255), nullable=False)
is_hidden = Column(Boolean, default=False)
filter_select_enabled = Column(Boolean, default=True) # override default
fetch_values_from = Column(String(100))
cluster_id = Column(Integer, ForeignKey("clusters.id"), nullable=False)
cluster = relationship(
"DruidCluster", backref="datasources", foreign_keys=[cluster_id]
)
owners = relationship(
owner_class, secondary=druiddatasource_user, backref="druiddatasources"
)
export_fields = [
"datasource_name",
"is_hidden",
"description",
"default_endpoint",
"cluster_id",
"offset",
"cache_timeout",
"params",
"filter_select_enabled",
]
update_from_object_fields = export_fields
export_parent = "cluster"
export_children = ["columns", "metrics"]
@property
def cluster_name(self) -> str:
cluster = (
self.cluster
or db.session.query(DruidCluster).filter_by(id=self.cluster_id).one()
)
return cluster.cluster_name
@property
def database(self) -> DruidCluster:
return self.cluster
@property
def connection(self) -> str:
return str(self.database)
@property
def num_cols(self) -> List[str]:
return [c.column_name for c in self.columns if c.is_numeric]
@property
def name(self) -> str:
return self.datasource_name
@property
def datasource_type(self) -> str:
return self.type
@property
def schema(self) -> Optional[str]:
ds_name = self.datasource_name or ""
name_pieces = ds_name.split(".")
if len(name_pieces) > 1:
return name_pieces[0]
else:
return None
def get_schema_perm(self) -> Optional[str]:
"""Returns schema permission if present, cluster one otherwise."""
return security_manager.get_schema_perm(self.cluster, self.schema)
def get_perm(self) -> str:
return ("[{obj.cluster_name}].[{obj.datasource_name}]" "(id:{obj.id})").format(
obj=self
)
def update_from_object(self, obj: Dict[str, Any]) -> None:
raise NotImplementedError()
@property
def link(self) -> Markup:
name = escape(self.datasource_name)
return Markup(f'<a href="{self.url}">{name}</a>')
@property
def full_name(self) -> str:
return utils.get_datasource_full_name(self.cluster_name, self.datasource_name)
@property
def time_column_grains(self) -> Dict[str, List[str]]:
return {
"time_columns": [
"all",
"5 seconds",
"30 seconds",
"1 minute",
"5 minutes",
"30 minutes",
"1 hour",
"6 hour",
"1 day",
"7 days",
"week",
"week_starting_sunday",
"week_ending_saturday",
"month",
"quarter",
"year",
],
"time_grains": ["now"],
}
def __repr__(self) -> str:
return self.datasource_name
@renders("datasource_name")
def datasource_link(self) -> str:
url = f"/superset/explore/{self.type}/{self.id}/"
name = escape(self.datasource_name)
return Markup(f'<a href="{url}">{name}</a>')
def get_metric_obj(self, metric_name: str) -> Dict[str, Any]:
return [m.json_obj for m in self.metrics if m.metric_name == metric_name][0]
@classmethod
def import_obj(
cls, i_datasource: "DruidDatasource", import_time: Optional[int] = None
) -> int:
"""Imports the datasource from the object to the database.
Metrics and columns and datasource will be overridden if exists.
This function can be used to import/export dashboards between multiple
superset instances. Audit metadata isn't copies over.
"""
def lookup_datasource(d: DruidDatasource) -> Optional[DruidDatasource]:
return (
db.session.query(DruidDatasource)
.filter(
DruidDatasource.datasource_name == d.datasource_name,
DruidDatasource.cluster_id == d.cluster_id,
)
.first()
)
def lookup_cluster(d: DruidDatasource) -> Optional[DruidCluster]:
return db.session.query(DruidCluster).filter_by(id=d.cluster_id).first()
return import_datasource.import_datasource(
db.session, i_datasource, lookup_cluster, lookup_datasource, import_time
)
def latest_metadata(self) -> Optional[Dict[str, Any]]:
"""Returns segment metadata from the latest segment"""
logger.info("Syncing datasource [{}]".format(self.datasource_name))
client = self.cluster.get_pydruid_client()
try:
results = client.time_boundary(datasource=self.datasource_name)
except IOError:
results = None
if results:
max_time = results[0]["result"]["maxTime"]
max_time = dparse(max_time)
else:
max_time = datetime.now()
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which triggered a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
lbound = (max_time - timedelta(days=7)).isoformat()
if LooseVersion(self.cluster.druid_version) < LooseVersion("0.8.2"):
rbound = (max_time - timedelta(1)).isoformat()
else:
rbound = max_time.isoformat()
segment_metadata = None
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + "/" + rbound,
merge=self.merge_flag,
analysisTypes=[],
)
except Exception as ex:
logger.warning("Failed first attempt to get latest segment")
logger.exception(ex)
if not segment_metadata:
# if no segments in the past 7 days, look at all segments
lbound = datetime(1901, 1, 1).isoformat()[:10]
if LooseVersion(self.cluster.druid_version) < LooseVersion("0.8.2"):
rbound = datetime.now().isoformat()
else:
rbound = datetime(2050, 1, 1).isoformat()[:10]
try:
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=lbound + "/" + rbound,
merge=self.merge_flag,
analysisTypes=[],
)
except Exception as ex:
logger.warning("Failed 2nd attempt to get latest segment")
logger.exception(ex)
if segment_metadata:
return segment_metadata[-1]["columns"]
return None
def refresh_metrics(self) -> None:
for col in self.columns:
col.refresh_metrics()
@classmethod
def sync_to_db_from_config(
cls,
druid_config: Dict[str, Any],
user: User,
cluster: DruidCluster,
refresh: bool = True,
) -> None:
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls).filter_by(datasource_name=druid_config["name"]).first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config["name"],
cluster=cluster,
owners=[user],
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config["dimensions"]
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id,
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type="STRING",
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(
DruidMetric.metric_name.in_(
spec["name"] for spec in druid_config["metrics_spec"]
)
)
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config["metrics_spec"]:
metric_name = metric_spec["name"]
metric_type = metric_spec["type"]
metric_json = json.dumps(metric_spec)
if metric_type == "count":
metric_type = "longSum"
metric_json = json.dumps(
{"type": "longSum", "name": metric_name, "fieldName": metric_name}
)
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name="%s(%s)" % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
"Imported from the airolap config dir for %s"
% druid_config["name"]
),
)
session.add(metric_obj)
session.commit()
@staticmethod
def time_offset(granularity: Granularity) -> int:
if granularity == "week_ending_saturday":
return 6 * 24 * 3600 * 1000 # 6 days
return 0
@classmethod
def get_datasource_by_name(
cls, session: Session, datasource_name: str, schema: str, database_name: str
) -> Optional["DruidDatasource"]:
query = (
session.query(cls)
.join(DruidCluster)
.filter(cls.datasource_name == datasource_name)
.filter(DruidCluster.cluster_name == database_name)
)
return query.first()
# uses https://en.wikipedia.org/wiki/ISO_8601
# http://druid.io/docs/0.8.0/querying/granularities.html
# TODO: pass origin from the UI
@staticmethod
def granularity(
period_name: str, timezone: Optional[str] = None, origin: Optional[str] = None
) -> Union[Dict[str, str], str]:
if not period_name or period_name == "all":
return "all"
iso_8601_dict = {
"5 seconds": "PT5S",
"30 seconds": "PT30S",
"1 minute": "PT1M",
"5 minutes": "PT5M",
"30 minutes": "PT30M",
"1 hour": "PT1H",
"6 hour": "PT6H",
"one day": "P1D",
"1 day": "P1D",
"7 days": "P7D",
"week": "P1W",
"week_starting_sunday": "P1W",
"week_ending_saturday": "P1W",
"month": "P1M",
"quarter": "P3M",
"year": "P1Y",
}
granularity = {"type": "period"}
if timezone:
granularity["timeZone"] = timezone
if origin:
dttm = utils.parse_human_datetime(origin)
assert dttm
granularity["origin"] = dttm.isoformat()
if period_name in iso_8601_dict:
granularity["period"] = iso_8601_dict[period_name]
if period_name in ("week_ending_saturday", "week_starting_sunday"):
# use Sunday as start of the week
granularity["origin"] = "2016-01-03T00:00:00"
elif not isinstance(period_name, str):
granularity["type"] = "duration"
granularity["duration"] = period_name
elif period_name.startswith("P"):
# identify if the string is the iso_8601 period
granularity["period"] = period_name
else:
granularity["type"] = "duration"
granularity["duration"] = (
utils.parse_human_timedelta(period_name).total_seconds() # type: ignore
* 1000
)
return granularity
@staticmethod
def get_post_agg(mconf: Dict[str, Any]) -> "Postaggregator":
"""
For a metric specified as `postagg` returns the
kind of post aggregation for pydruid.
"""
if mconf.get("type") == "javascript":
return JavascriptPostAggregator(
name=mconf.get("name", ""),
field_names=mconf.get("fieldNames", []),
function=mconf.get("function", ""),
)
elif mconf.get("type") == "quantile":
return Quantile(mconf.get("name", ""), mconf.get("probability", ""))
elif mconf.get("type") == "quantiles":
return Quantiles(mconf.get("name", ""), mconf.get("probabilities", ""))
elif mconf.get("type") == "fieldAccess":
return Field(mconf.get("name"))
elif mconf.get("type") == "constant":
return Const(mconf.get("value"), output_name=mconf.get("name", ""))
elif mconf.get("type") == "hyperUniqueCardinality":
return HyperUniqueCardinality(mconf.get("name"))
elif mconf.get("type") == "arithmetic":
return Postaggregator(
mconf.get("fn", "/"), mconf.get("fields", []), mconf.get("name", "")
)
else:
return CustomPostAggregator(mconf.get("name", ""), mconf)
@staticmethod
def find_postaggs_for(
postagg_names: Set[str], metrics_dict: Dict[str, DruidMetric]
) -> List[DruidMetric]:
"""Return a list of metrics that are post aggregations"""
postagg_metrics = [
metrics_dict[name]
for name in postagg_names
if metrics_dict[name].metric_type == POST_AGG_TYPE
]
# Remove post aggregations that were found
for postagg in postagg_metrics:
postagg_names.remove(postagg.metric_name)
return postagg_metrics
@staticmethod
def recursive_get_fields(_conf: Dict[str, Any]) -> List[str]:
_type = _conf.get("type")
_field = _conf.get("field")
_fields = _conf.get("fields")
field_names = []
if _type in ["fieldAccess", "hyperUniqueCardinality", "quantile", "quantiles"]:
field_names.append(_conf.get("fieldName", ""))
if _field:
field_names += DruidDatasource.recursive_get_fields(_field)
if _fields:
for _f in _fields:
field_names += DruidDatasource.recursive_get_fields(_f)
return list(set(field_names))
@staticmethod
def resolve_postagg(
postagg: DruidMetric,
post_aggs: Dict[str, Any],
agg_names: Set[str],
visited_postaggs: Set[str],
metrics_dict: Dict[str, DruidMetric],
) -> None:
mconf = postagg.json_obj
required_fields = set(
DruidDatasource.recursive_get_fields(mconf) + mconf.get("fieldNames", [])
)
# Check if the fields are already in aggs
# or is a previous postagg
required_fields = set(
field
for field in required_fields
if field not in visited_postaggs and field not in agg_names
)
# First try to find postaggs that match
if len(required_fields) > 0:
missing_postaggs = DruidDatasource.find_postaggs_for(
required_fields, metrics_dict
)
for missing_metric in required_fields:
agg_names.add(missing_metric)
for missing_postagg in missing_postaggs:
# Add to visited first to avoid infinite recursion
# if post aggregations are cyclicly dependent
visited_postaggs.add(missing_postagg.metric_name)
for missing_postagg in missing_postaggs:
DruidDatasource.resolve_postagg(
missing_postagg,
post_aggs,
agg_names,
visited_postaggs,
metrics_dict,
)
post_aggs[postagg.metric_name] = DruidDatasource.get_post_agg(postagg.json_obj)
@staticmethod
def metrics_and_post_aggs(
metrics: List[Metric], metrics_dict: Dict[str, DruidMetric]
) -> Tuple["OrderedDict[str, Any]", "OrderedDict[str, Any]"]:
# Separate metrics into those that are aggregations
# and those that are post aggregations
saved_agg_names = set()
adhoc_agg_configs = []
postagg_names = []
for metric in metrics:
if isinstance(metric, dict) and utils.is_adhoc_metric(metric):
adhoc_agg_configs.append(metric)
elif isinstance(metric, str):
if metrics_dict[metric].metric_type != POST_AGG_TYPE:
saved_agg_names.add(metric)
else:
postagg_names.append(metric)
# Create the post aggregations, maintain order since postaggs
# may depend on previous ones
post_aggs: "OrderedDict[str, Postaggregator]" = OrderedDict()
visited_postaggs = set()
for postagg_name in postagg_names:
postagg = metrics_dict[postagg_name]
visited_postaggs.add(postagg_name)
DruidDatasource.resolve_postagg(
postagg, post_aggs, saved_agg_names, visited_postaggs, metrics_dict
)
aggs = DruidDatasource.get_aggregations(
metrics_dict, saved_agg_names, adhoc_agg_configs
)
return aggs, post_aggs
def values_for_column(self, column_name: str, limit: int = 10000) -> List[Any]:
"""Retrieve some values for the given column"""
logger.info(
"Getting values for columns [{}] limited to [{}]".format(column_name, limit)
)
# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
if self.fetch_values_from:
from_dttm = utils.parse_human_datetime(self.fetch_values_from)
assert from_dttm
else:
from_dttm = datetime(1970, 1, 1)
qry = dict(
datasource=self.datasource_name,
granularity="all",
intervals=from_dttm.isoformat() + "/" + datetime.now().isoformat(),
aggregations=dict(count=count("count")),
dimension=column_name,
metric="count",
threshold=limit,
)
client = self.cluster.get_pydruid_client()
client.topn(**qry)
df = client.export_pandas()
return df[column_name].to_list()
def get_query_str(
self,
query_obj: QueryObjectDict,
phase: int = 1,
client: Optional["PyDruid"] = None,
) -> str:
return self.run_query(client=client, phase=phase, **query_obj)
def _add_filter_from_pre_query_data(
self, df: pd.DataFrame, dimensions: List[Any], dim_filter: "Filter"
) -> "Filter":
ret = dim_filter
if not df.empty:
new_filters = []
for unused, row in df.iterrows():
fields = []
for dim in dimensions:
f = None
# Check if this dimension uses an extraction function
# If so, create the appropriate pydruid extraction object
if isinstance(dim, dict) and "extractionFn" in dim:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(
dim
)
dim_val = dim["outputName"]
f = Filter(
dimension=col,
value=row[dim_val],
extraction_function=extraction_fn,
)
elif isinstance(dim, dict):
dim_val = dim["outputName"]
if dim_val:
f = Dimension(dim_val) == row[dim_val]
else:
f = Dimension(dim) == row[dim]
if f:
fields.append(f)
if len(fields) > 1:
term = Filter(type="and", fields=fields)
new_filters.append(term)
elif fields:
new_filters.append(fields[0])
if new_filters:
ff = Filter(type="or", fields=new_filters)
if not dim_filter:
ret = ff
else:
ret = Filter(type="and", fields=[ff, dim_filter])
return ret
@staticmethod
def druid_type_from_adhoc_metric(adhoc_metric: Dict[str, Any]) -> str:
column_type = adhoc_metric["column"]["type"].lower()
aggregate = adhoc_metric["aggregate"].lower()
if aggregate == "count":
return "count"
if aggregate == "count_distinct":
return "hyperUnique" if column_type == "hyperunique" else "cardinality"
else:
return column_type + aggregate.capitalize()
@staticmethod
def get_aggregations(
metrics_dict: Dict[str, Any],
saved_metrics: Set[str],
adhoc_metrics: Optional[List[Dict[str, Any]]] = None,
) -> "OrderedDict[str, Any]":
"""
Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations
"""
if not adhoc_metrics:
adhoc_metrics = []
aggregations = OrderedDict()
invalid_metric_names = []
for metric_name in saved_metrics:
if metric_name in metrics_dict:
metric = metrics_dict[metric_name]
if metric.metric_type == POST_AGG_TYPE:
invalid_metric_names.append(metric_name)
else:
aggregations[metric_name] = metric.json_obj
else:
invalid_metric_names.append(metric_name)
if len(invalid_metric_names) > 0:
raise SupersetException(
_("Metric(s) {} must be aggregations.").format(invalid_metric_names)
)
for adhoc_metric in adhoc_metrics:
aggregations[adhoc_metric["label"]] = {
"fieldName": adhoc_metric["column"]["column_name"],
"fieldNames": [adhoc_metric["column"]["column_name"]],
"type": DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),
"name": adhoc_metric["label"],
}
return aggregations
def get_dimensions(
self, columns: List[str], columns_dict: Dict[str, DruidColumn]
) -> List[Union[str, Dict[str, Any]]]:
dimensions = []
columns = [col for col in columns if col in columns_dict]
for column_name in columns:
col = columns_dict.get(column_name)
dim_spec = col.dimension_spec if col else None
dimensions.append(dim_spec or column_name)
return dimensions
def intervals_from_dttms(self, from_dttm: datetime, to_dttm: datetime) -> str:
# Couldn't find a way to just not filter on time...
from_dttm = from_dttm or datetime(1901, 1, 1)
to_dttm = to_dttm or datetime(2101, 1, 1)
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=DRUID_TZ)
to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)
return "{}/{}".format(
from_dttm.isoformat() if from_dttm else "",
to_dttm.isoformat() if to_dttm else "",
)
@staticmethod
def _dimensions_to_values(
dimensions: List[Union[Dict[str, str], str]]
) -> List[Union[Dict[str, str], str]]:
"""
Replace dimensions specs with their `dimension`
values, and ignore those without
"""
values: List[Union[Dict[str, str], str]] = []
for dimension in dimensions:
if isinstance(dimension, dict):
if "extractionFn" in dimension:
values.append(dimension)
elif "dimension" in dimension:
values.append(dimension["dimension"])
else:
values.append(dimension)
return values
@staticmethod
def sanitize_metric_object(metric: Metric) -> None:
"""
Update a metric with the correct type if necessary.
:param dict metric: The metric to sanitize
"""
if (
utils.is_adhoc_metric(metric)
and metric["column"]["type"].upper() == "FLOAT" # type: ignore
):
metric["column"]["type"] = "DOUBLE" # type: ignore
def run_query( # druid
self,
metrics: List[Metric],
granularity: str,
from_dttm: datetime,
to_dttm: datetime,
columns: Optional[List[str]] = None,
groupby: Optional[List[str]] = None,
filter: Optional[List[Dict[str, Any]]] = None,
is_timeseries: Optional[bool] = True,
timeseries_limit: Optional[int] = None,
timeseries_limit_metric: Optional[Metric] = None,
row_limit: Optional[int] = None,
row_offset: Optional[int] = None,
inner_from_dttm: Optional[datetime] = None,
inner_to_dttm: Optional[datetime] = None,
orderby: Optional[Any] = None,
extras: Optional[Dict[str, Any]] = None,
phase: int = 2,
client: Optional["PyDruid"] = None,
order_desc: bool = True,
) -> str:
"""Runs a query against Druid and returns a dataframe.
"""
# TODO refactor into using a TBD Query object
client = client or self.cluster.get_pydruid_client()
row_limit = row_limit or conf.get("ROW_LIMIT")
if row_offset:
raise SupersetException("Offset not implemented for Druid connector")
if not is_timeseries:
granularity = "all"
if granularity == "all":
phase = 1
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None
query_str = ""
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
if self.cluster and LooseVersion(
self.cluster.get_druid_version()
) < LooseVersion("0.11.0"):
for metric in metrics:
self.sanitize_metric_object(metric)
if timeseries_limit_metric:
self.sanitize_metric_object(timeseries_limit_metric)
aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
# the dimensions list with dimensionSpecs expanded
columns_ = columns if IS_SIP_38 else groupby
dimensions = self.get_dimensions(columns_, columns_dict) if columns_ else []
extras = extras or {}
qry = dict(
datasource=self.datasource_name,
dimensions=dimensions,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity, timezone=timezone, origin=extras.get("druid_time_origin")
),
post_aggregations=post_aggs,
intervals=self.intervals_from_dttms(from_dttm, to_dttm),
)
if is_timeseries:
qry["context"] = dict(skipEmptyBuckets=True)
filters = (
DruidDatasource.get_filters(filter, self.num_cols, columns_dict)
if filter
else None
)
if filters:
qry["filter"] = filters
if "having_druid" in extras:
having_filters = self.get_having_filters(extras["having_druid"])
if having_filters:
qry["having"] = having_filters
else:
having_filters = None
order_direction = "descending" if order_desc else "ascending"
if (IS_SIP_38 and not metrics and columns and "__time" not in columns) or (
not IS_SIP_38 and columns
):
columns.append("__time")
del qry["post_aggregations"]
del qry["aggregations"]
del qry["dimensions"]
qry["columns"] = columns
qry["metrics"] = []
qry["granularity"] = "all"
qry["limit"] = row_limit
client.scan(**qry)
elif (IS_SIP_38 and columns) or (
not IS_SIP_38 and not groupby and not having_filters
):
logger.info("Running timeseries query for no groupby values")
del qry["dimensions"]
client.timeseries(**qry)
elif (
not having_filters
and order_desc
and (
(IS_SIP_38 and columns and len(columns) == 1)
or (not IS_SIP_38 and groupby and len(groupby) == 1)
)
):
dim = list(qry["dimensions"])[0]
logger.info("Running two-phase topn query for dimension [{}]".format(dim))
pre_qry = deepcopy(qry)
order_by: Optional[str] = None
if timeseries_limit_metric:
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric], metrics_dict
)
if phase == 1:
pre_qry["aggregations"].update(aggs_dict)
pre_qry["post_aggregations"].update(post_aggs_dict)
else:
pre_qry["aggregations"] = aggs_dict
pre_qry["post_aggregations"] = post_aggs_dict
else:
agg_keys = qry["aggregations"].keys()
order_by = list(agg_keys)[0] if agg_keys else None
# Limit on the number of timeseries, doing a two-phases query
pre_qry["granularity"] = "all"
pre_qry["threshold"] = min(row_limit, timeseries_limit or row_limit)
pre_qry["metric"] = order_by
pre_qry["dimension"] = self._dimensions_to_values(qry["dimensions"])[0]
del pre_qry["dimensions"]
client.topn(**pre_qry)
logger.info("Phase 1 Complete")
if phase == 2:
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2
)
query_str += "\n"
if phase == 1:
return query_str
query_str += "// Phase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is None:
df = pd.DataFrame()
qry["filter"] = self._add_filter_from_pre_query_data(
df, [pre_qry["dimension"]], filters
)
qry["threshold"] = timeseries_limit or 1000
if row_limit and granularity == "all":
qry["threshold"] = row_limit
qry["dimension"] = dim
del qry["dimensions"]
qry["metric"] = list(qry["aggregations"].keys())[0]
client.topn(**qry)
logger.info("Phase 2 Complete")
elif having_filters or ((IS_SIP_38 and columns) or (not IS_SIP_38 and groupby)):
# If grouping on multiple fields or using a having filter
# we have to force a groupby query
logger.info("Running groupby query for dimensions [{}]".format(dimensions))
if timeseries_limit and is_timeseries:
logger.info("Running two-phase query for timeseries")
pre_qry = deepcopy(qry)
pre_qry_dims = self._dimensions_to_values(qry["dimensions"])
# Can't use set on an array with dicts
# Use set with non-dict items only
non_dict_dims = list(
set([x for x in pre_qry_dims if not isinstance(x, dict)])
)
dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)]
pre_qry["dimensions"] = non_dict_dims + dict_dims # type: ignore
order_by = None
if metrics:
order_by = utils.get_metric_name(metrics[0])
else:
order_by = pre_qry_dims[0] # type: ignore
if timeseries_limit_metric:
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric], metrics_dict
)
if phase == 1:
pre_qry["aggregations"].update(aggs_dict)
pre_qry["post_aggregations"].update(post_aggs_dict)
else:
pre_qry["aggregations"] = aggs_dict
pre_qry["post_aggregations"] = post_aggs_dict
# Limit on the number of timeseries, doing a two-phases query
pre_qry["granularity"] = "all"
pre_qry["limit_spec"] = {
"type": "default",
"limit": min(timeseries_limit, row_limit),
"intervals": self.intervals_from_dttms(
inner_from_dttm, inner_to_dttm
),
"columns": [{"dimension": order_by, "direction": order_direction}],
}
client.groupby(**pre_qry)
logger.info("Phase 1 Complete")
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2
)
query_str += "\n"
if phase == 1:
return query_str
query_str += "// Phase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is None:
df = pd.DataFrame()
qry["filter"] = self._add_filter_from_pre_query_data(
df, pre_qry["dimensions"], filters
)
qry["limit_spec"] = None
if row_limit:
dimension_values = self._dimensions_to_values(dimensions)
qry["limit_spec"] = {
"type": "default",
"limit": row_limit,
"columns": [
{
"dimension": (
utils.get_metric_name(metrics[0])
if metrics
else dimension_values[0]
),
"direction": order_direction,
}
],
}
client.groupby(**qry)
logger.info("Query Complete")
query_str += json.dumps(client.query_builder.last_query.query_dict, indent=2)
return query_str
@staticmethod
def homogenize_types(df: pd.DataFrame, columns: Iterable[str]) -> pd.DataFrame:
"""Converting all columns to strings
When grouping by a numeric (say FLOAT) column, pydruid returns
strings in the dataframe. This creates issues downstream related
to having mixed types in the dataframe
Here we replace None with <NULL> and make the whole series a
str instead of an object.
"""
df[columns] = df[columns].fillna(NULL_STRING).astype("unicode")
return df
def query(self, query_obj: QueryObjectDict) -> QueryResult:
qry_start_dttm = datetime.now()
client = self.cluster.get_pydruid_client()
query_str = self.get_query_str(client=client, query_obj=query_obj, phase=2)
df = client.export_pandas()
if df is None:
df = pd.DataFrame()
if df.empty:
return QueryResult(
df=df, query=query_str, duration=datetime.now() - qry_start_dttm
)
df = self.homogenize_types(
df, query_obj.get("columns" if IS_SIP_38 else "groupby", [])
)
df.columns = [
DTTM_ALIAS if c in ("timestamp", "__time") else c for c in df.columns
]
is_timeseries = (
query_obj["is_timeseries"] if "is_timeseries" in query_obj else True
)
if not is_timeseries and DTTM_ALIAS in df.columns:
del df[DTTM_ALIAS]
# Reordering columns
cols: List[str] = []
if DTTM_ALIAS in df.columns:
cols += [DTTM_ALIAS]
if not IS_SIP_38:
cols += query_obj.get("groupby") or []
cols += query_obj.get("columns") or []
cols += query_obj.get("metrics") or []
cols = utils.get_metric_names(cols)
cols = [col for col in cols if col in df.columns]
df = df[cols]
time_offset = DruidDatasource.time_offset(query_obj["granularity"])
def increment_timestamp(ts: str) -> datetime:
dt = utils.parse_human_datetime(ts).replace(tzinfo=DRUID_TZ)
return dt + timedelta(milliseconds=time_offset)
if DTTM_ALIAS in df.columns and time_offset:
df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(increment_timestamp)
return QueryResult(
df=df, query=query_str, duration=datetime.now() - qry_start_dttm
)
@staticmethod
def _create_extraction_fn(
dim_spec: Dict[str, Any]
) -> Tuple[
str,
Union[
"MapLookupExtraction",
"RegexExtraction",
"RegisteredLookupExtraction",
"TimeFormatExtraction",
],
]:
extraction_fn = None
if dim_spec and "extractionFn" in dim_spec:
col = dim_spec["dimension"]
fn = dim_spec["extractionFn"]
ext_type = fn.get("type")
if ext_type == "lookup" and fn["lookup"].get("type") == "map":
replace_missing_values = fn.get("replaceMissingValueWith")
retain_missing_values = fn.get("retainMissingValue", False)
injective = fn.get("isOneToOne", False)
extraction_fn = MapLookupExtraction(
fn["lookup"]["map"],
replace_missing_values=replace_missing_values,
retain_missing_values=retain_missing_values,
injective=injective,
)
elif ext_type == "regex":
extraction_fn = RegexExtraction(fn["expr"])
elif ext_type == "registeredLookup":
extraction_fn = RegisteredLookupExtraction(fn.get("lookup"))
elif ext_type == "timeFormat":
extraction_fn = TimeFormatExtraction(
fn.get("format"), fn.get("locale"), fn.get("timeZone")
)
else:
raise Exception(_("Unsupported extraction function: " + ext_type))
return (col, extraction_fn)
@classmethod
def get_filters(
cls,
raw_filters: List[Dict[str, Any]],
num_cols: List[str],
columns_dict: Dict[str, DruidColumn],
) -> "Filter":
"""Given Superset filter data structure, returns pydruid Filter(s)"""
filters = None
for flt in raw_filters:
col: Optional[str] = flt.get("col")
op: Optional[str] = flt["op"].upper() if "op" in flt else None
eq: Optional[FilterValues] = flt.get("val")
if (
not col
or not op
or (
eq is None
and op
not in (
FilterOperator.IS_NULL.value,
FilterOperator.IS_NOT_NULL.value,
)
)
):
continue
# Check if this dimension uses an extraction function
# If so, create the appropriate pydruid extraction object
column_def = columns_dict.get(col)
dim_spec = column_def.dimension_spec if column_def else None
extraction_fn = None
if dim_spec and "extractionFn" in dim_spec:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
cond = None
is_numeric_col = col in num_cols
is_list_target = op in (
FilterOperator.IN.value,
FilterOperator.NOT_IN.value,
)
eq = cls.filter_values_handler(
eq,
is_list_target=is_list_target,
target_column_is_numeric=is_numeric_col,
)
# For these two ops, could have used Dimension,
# but it doesn't support extraction functions
if op == FilterOperator.EQUALS.value:
cond = Filter(
dimension=col, value=eq, extraction_function=extraction_fn
)
elif op == FilterOperator.NOT_EQUALS.value:
cond = ~Filter(
dimension=col, value=eq, extraction_function=extraction_fn
)
elif is_list_target:
eq = cast(List[Any], eq)
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
# if it uses an extraction fn, use the "in" operator
# as Dimension isn't supported
elif extraction_fn is not None:
cond = Filter(
dimension=col,
values=eq,
type="in",
extraction_function=extraction_fn,
)
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type="or", fields=fields)
if op == FilterOperator.NOT_IN.value:
cond = ~cond
elif op == FilterOperator.REGEX.value:
cond = Filter(
extraction_function=extraction_fn,
type="regex",
pattern=eq,
dimension=col,
)
# For the ops below, could have used pydruid's Bound,
# but it doesn't support extraction functions
elif op == FilterOperator.GREATER_THAN_OR_EQUALS.value:
cond = Bound(
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=eq,
upper=None,
ordering=cls._get_ordering(is_numeric_col),
)
elif op == FilterOperator.LESS_THAN_OR_EQUALS.value:
cond = Bound(
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=None,
upper=eq,
ordering=cls._get_ordering(is_numeric_col),
)
elif op == FilterOperator.GREATER_THAN.value:
cond = Bound(
extraction_function=extraction_fn,
lowerStrict=True,
upperStrict=False,
dimension=col,
lower=eq,
upper=None,
ordering=cls._get_ordering(is_numeric_col),
)
elif op == FilterOperator.LESS_THAN.value:
cond = Bound(
extraction_function=extraction_fn,
upperStrict=True,
lowerStrict=False,
dimension=col,
lower=None,
upper=eq,
ordering=cls._get_ordering(is_numeric_col),
)
elif op == FilterOperator.IS_NULL.value:
cond = Filter(dimension=col, value="")
elif op == FilterOperator.IS_NOT_NULL.value:
cond = ~Filter(dimension=col, value="")
if filters:
filters = Filter(type="and", fields=[cond, filters])
else:
filters = cond
return filters
@staticmethod
def _get_ordering(is_numeric_col: bool) -> str:
return "numeric" if is_numeric_col else "lexicographic"
def _get_having_obj(self, col: str, op: str, eq: str) -> "Having":
cond = None
if op == FilterOperator.EQUALS.value:
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == FilterOperator.GREATER_THAN.value:
cond = Aggregation(col) > eq
elif op == FilterOperator.LESS_THAN.value:
cond = Aggregation(col) < eq
return cond
def get_having_filters(
self, raw_filters: List[Dict[str, Any]]
) -> Optional["Having"]:
filters = None
reversed_op_map = {
FilterOperator.NOT_EQUALS.value: FilterOperator.EQUALS.value,
FilterOperator.GREATER_THAN_OR_EQUALS.value: FilterOperator.LESS_THAN.value,
FilterOperator.LESS_THAN_OR_EQUALS.value: FilterOperator.GREATER_THAN.value,
}
for flt in raw_filters:
if not all(f in flt for f in ["col", "op", "val"]):
continue
col = flt["col"]
op = flt["op"]
eq = flt["val"]
cond = None
if op in [
FilterOperator.EQUALS.value,
FilterOperator.GREATER_THAN.value,
FilterOperator.LESS_THAN.value,
]:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
@classmethod
def query_datasources_by_name(
cls,
session: Session,
database: Database,
datasource_name: str,
schema: Optional[str] = None,
) -> List["DruidDatasource"]:
return []
def external_metadata(self) -> List[Dict[str, Any]]:
self.merge_flag = True
latest_metadata = self.latest_metadata() or {}
return [{"name": k, "type": v.get("type")} for k, v in latest_metadata.items()]
sa.event.listen(DruidDatasource, "after_insert", security_manager.set_perm)
sa.event.listen(DruidDatasource, "after_update", security_manager.set_perm)
| airbnb/superset | superset/connectors/druid/models.py | Python | apache-2.0 | 64,948 |
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
import json
from mock import patch
#from base import web, model, self, db
from default import db, with_context
from nose.tools import assert_equal, assert_raises
from test_api import HelperAPI
from pybossa.model.app import App
from pybossa.model.task import Task
from pybossa.model.task_run import TaskRun
class TestApiCommon(HelperAPI):
@with_context
def test_00_limits_query(self):
"""Test API GET limits works"""
for i in range(30):
app = App(name="name%s" % i,
short_name="short_name%s" % i,
description="desc",
owner_id=1)
info = dict(a=0)
task = Task(app_id=1, info=info)
taskrun = TaskRun(app_id=1, task_id=1)
db.session.add(app)
db.session.add(task)
db.session.add(taskrun)
db.session.commit()
res = self.app.get('/api/app')
print res.data
data = json.loads(res.data)
assert len(data) == 20, len(data)
res = self.app.get('/api/app?limit=10')
data = json.loads(res.data)
assert len(data) == 10, len(data)
res = self.app.get('/api/app?limit=10&offset=10')
data = json.loads(res.data)
assert len(data) == 10, len(data)
assert data[0].get('name') == 'name9'
res = self.app.get('/api/task')
data = json.loads(res.data)
assert len(data) == 20, len(data)
res = self.app.get('/api/taskrun')
data = json.loads(res.data)
assert len(data) == 20, len(data)
# Register 30 new users to test limit on users too
for i in range(30):
self.register(fullname="User%s" %i, username="user%s" %i)
res = self.app.get('/api/user')
data = json.loads(res.data)
assert len(data) == 20, len(data)
res = self.app.get('/api/user?limit=10')
data = json.loads(res.data)
print data
assert len(data) == 10, len(data)
res = self.app.get('/api/user?limit=10&offset=10')
data = json.loads(res.data)
assert len(data) == 10, len(data)
assert data[0].get('name') == 'user7', data
@with_context
def test_get_query_with_api_key(self):
""" Test API GET query with an API-KEY"""
for endpoint in self.endpoints:
url = '/api/' + endpoint + '?api_key=' + self.api_key
res = self.app.get(url)
data = json.loads(res.data)
if endpoint == 'app':
assert len(data) == 1, data
app = data[0]
assert app['info']['total'] == 150, data
# The output should have a mime-type: application/json
assert res.mimetype == 'application/json', res
if endpoint == 'task':
assert len(data) == 10, data
task = data[0]
assert task['info']['url'] == 'my url', data
# The output should have a mime-type: application/json
assert res.mimetype == 'application/json', res
if endpoint == 'taskrun':
assert len(data) == 10, data
taskrun = data[0]
assert taskrun['info']['answer'] == 'annakarenina', data
# The output should have a mime-type: application/json
assert res.mimetype == 'application/json', res
if endpoint == 'user':
# With self.create() 3 users are created in the DB
assert len(data) == 3, data
user = data[0]
assert user['name'] == 'root', data
# The output should have a mime-type: application/json
assert res.mimetype == 'application/json', res
@with_context
def test_query_search_wrongfield(self):
""" Test API query search works"""
# Test first a non-existant field for all end-points
for endpoint in self.endpoints:
res = self.app.get("/api/%s?wrongfield=value" % endpoint)
err = json.loads(res.data)
assert res.status_code == 415, err
assert err['status'] == 'failed', err
assert err['action'] == 'GET', err
assert err['exception_cls'] == 'AttributeError', err
@with_context
def test_query_sql_injection(self):
"""Test API SQL Injection is not allowed works"""
q = '1%3D1;SELECT%20*%20FROM%20task%20WHERE%201=1'
res = self.app.get('/api/task?' + q)
error = json.loads(res.data)
assert res.status_code == 415, error
assert error['action'] == 'GET', error
assert error['status'] == 'failed', error
assert error['target'] == 'task', error
q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'
res = self.app.get('/api/apappp?' + q)
assert res.status_code == 404, res.data
q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'
res = self.app.get('/api/' + q)
assert res.status_code == 404, res.data
q = 'app_id=1%3D1;SELECT%20*%20FROM%20task%20WHERE%201'
res = self.app.get('/api' + q)
assert res.status_code == 404, res.data
@with_context
def test_09_delete_app_cascade(self):
"""Test API delete app deletes associated tasks and taskruns"""
tasks = self.app.get('/api/task?app_id=1&limit=1000')
tasks = json.loads(tasks.data)
task_runs = self.app.get('/api/taskrun?app_id=1&limit=1000')
task_runs = json.loads(task_runs.data)
url = '/api/app/%s?api_key=%s' % (1, self.api_key)
self.app.delete(url)
for task in tasks:
t = db.session.query(Task)\
.filter_by(app_id=1)\
.filter_by(id=task['id'])\
.all()
assert len(t) == 0, "There should not be any task"
tr = db.session.query(TaskRun)\
.filter_by(app_id=1)\
.filter_by(task_id=task['id'])\
.all()
assert len(tr) == 0, "There should not be any task run"
@with_context
def test_10_delete_task_cascade(self):
"""Test API delete app deletes associated tasks and taskruns"""
tasks = self.app.get('/api/task?app_id=1&limit=1000')
tasks = json.loads(tasks.data)
for t in tasks:
url = '/api/task/%s?api_key=%s' % (t['id'], self.api_key)
res = self.app.delete(url)
assert_equal(res.status, '204 NO CONTENT', res.data)
tr = []
tr = db.session.query(TaskRun)\
.filter_by(app_id=1)\
.filter_by(task_id=t['id'])\
.all()
assert len(tr) == 0, "There should not be any task run for task"
| CulturePlex/pybossa | test/test_api/test_api_common.py | Python | agpl-3.0 | 7,558 |
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import make_option
from django.db import transaction
from django.core.management.base import CommandError
from synnefo.management.common import (get_resource, convert_api_faults,
wait_server_task)
from snf_django.management.commands import SynnefoCommand
from snf_django.management.utils import parse_bool
from synnefo.logic import servers
ACTIONS = ["start", "stop", "reboot_hard", "reboot_soft"]
class Command(SynnefoCommand):
args = "<server_id>"
help = "Modify a server."
option_list = SynnefoCommand.option_list + (
make_option(
'--name',
dest='name',
metavar='NAME',
help="Rename server."),
make_option(
'--user',
dest='user',
metavar='USER_UUID',
help="Change ownership of server. Value must be a user UUID"),
make_option(
"--suspended",
dest="suspended",
default=None,
choices=["True", "False"],
metavar="True|False",
help="Mark a server as suspended/non-suspended."),
make_option(
"--flavor",
dest="flavor",
metavar="FLAVOR_ID",
help="Resize a server by modifying its flavor. The new flavor"
" must have the same disk size and disk template."),
make_option(
"--action",
dest="action",
choices=ACTIONS,
metavar="|".join(ACTIONS),
help="Perform one of the allowed actions."),
make_option(
"--wait",
dest="wait",
default="True",
choices=["True", "False"],
metavar="True|False",
help="Wait for Ganeti jobs to complete. [Default: True]"),
)
@transaction.commit_on_success
@convert_api_faults
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Please provide a server ID")
server = get_resource("server", args[0], for_update=True)
new_name = options.get("name", None)
if new_name is not None:
old_name = server.name
server = servers.rename(server, new_name)
self.stdout.write("Renamed server '%s' from '%s' to '%s'\n" %
(server, old_name, new_name))
suspended = options.get("suspended", None)
if suspended is not None:
suspended = parse_bool(suspended)
server.suspended = suspended
server.save()
self.stdout.write("Set server '%s' as suspended=%s\n" %
(server, suspended))
new_owner = options.get('user')
if new_owner is not None:
if "@" in new_owner:
raise CommandError("Invalid user UUID.")
old_owner = server.userid
server.userid = new_owner
server.save()
msg = "Changed the owner of server '%s' from '%s' to '%s'.\n"
self.stdout.write(msg % (server, old_owner, new_owner))
wait = parse_bool(options["wait"])
new_flavor_id = options.get("flavor")
if new_flavor_id is not None:
new_flavor = get_resource("flavor", new_flavor_id)
old_flavor = server.flavor
msg = "Resizing server '%s' from flavor '%s' to '%s'.\n"
self.stdout.write(msg % (server, old_flavor, new_flavor))
server = servers.resize(server, new_flavor)
wait_server_task(server, wait, stdout=self.stdout)
action = options.get("action")
if action is not None:
if action == "start":
server = servers.start(server)
elif action == "stop":
server = servers.stop(server)
elif action == "reboot_hard":
server = servers.reboot(server, reboot_type="HARD")
elif action == "reboot_stof":
server = servers.reboot(server, reboot_type="SOFT")
else:
raise CommandError("Unknown action.")
wait_server_task(server, wait, stdout=self.stdout)
| Erethon/synnefo | snf-cyclades-app/synnefo/logic/management/commands/server-modify.py | Python | gpl-3.0 | 4,868 |
content_types = {
'css': 'text/css',
'gif': 'image/gif',
'html': 'text/html',
'jpg': 'image/jpeg',
'js': 'application/javascript',
'json': 'application/json',
'png': 'image/png',
'txt': 'text/plain',
}
def get_static_file(path, static_files):
"""Return the local filename and content type for the requested static
file URL.
:param path: the path portion of the requested URL.
:param static_files: a static file configuration dictionary.
This function returns a dictionary with two keys, "filename" and
"content_type". If the requested URL does not match any static file, the
return value is None.
"""
extra_path = ''
if path in static_files:
f = static_files[path]
else:
f = None
while path != '':
path, last = path.rsplit('/', 1)
extra_path = '/' + last + extra_path
if path in static_files:
f = static_files[path]
break
elif path + '/' in static_files:
f = static_files[path + '/']
break
if f:
if isinstance(f, str):
f = {'filename': f}
else:
f = f.copy() # in case it is mutated below
if f['filename'].endswith('/') and extra_path.startswith('/'):
extra_path = extra_path[1:]
f['filename'] += extra_path
if f['filename'].endswith('/'):
if '' in static_files:
if isinstance(static_files[''], str):
f['filename'] += static_files['']
else:
f['filename'] += static_files['']['filename']
if 'content_type' in static_files['']:
f['content_type'] = static_files['']['content_type']
else:
f['filename'] += 'index.html'
if 'content_type' not in f:
ext = f['filename'].rsplit('.')[-1]
f['content_type'] = content_types.get(
ext, 'application/octet-stream')
return f
| miguelgrinberg/python-engineio | src/engineio/static_files.py | Python | mit | 2,064 |
# GromacsWrapper: test_amber03star.py
# Copyright (c) 2009 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
from __future__ import division, absolute_import, print_function
import pytest
from gromacs.exceptions import GromacsError
from .top import TopologyTest
from ...datafiles import datafile
class TestAmber03star(TopologyTest):
processed = datafile('fileformats/top/amber03star/processed.top')
conf = datafile('fileformats/top/amber03star/conf.gro')
molecules = ['Protein', 'SOL', 'IB+', 'CA', 'CL', 'NA', 'MG', 'K', 'RB', 'CS', 'LI', 'ZN']
| Becksteinlab/GromacsWrapper | tests/fileformats/top/test_amber03star.py | Python | gpl-3.0 | 666 |
import yaml
def get_page_data(path, get, post, variables):
resume_data = yaml.load(open("static/resume.yml", "r"))
for job in resume_data['work']:
job['tasks_html'] = "<ul><li>%s</li></ul>" % "</li><li>".join(job['tasks'])
return resume_data
| ClintonMorrison/personal-website | app/pages/controllers/resume.py | Python | apache-2.0 | 254 |
# -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 Compatible Cloud Storage, (C) 2015 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minio.helpers
This module implements all helper functions.
:copyright: (c) 2015 by Minio, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import io
import collections
import binascii
import hashlib
import re
import os
import errno
import math
from .compat import urlsplit, urlencode
from .error import (InvalidBucketError, InvalidEndpointError,
InvalidArgumentError)
# Constants
MAX_MULTIPART_COUNT = 10000 # 10000 parts
MAX_MULTIPART_OBJECT_SIZE = 5 * 1024 * 1024 * 1024 * 1024 # 5TiB
MIN_OBJECT_SIZE = 5 * 1024 * 1024 # 5MiB
_VALID_BUCKETNAME_REGEX = re.compile('^[a-z0-9][a-z0-9\\.\\-]+[a-z0-9]$')
_ALLOWED_HOSTNAME_REGEX = re.compile(
'^((?!-)[A-Z\\d-]{1,63}(?<!-)\\.)*((?!-)[A-Z\\d-]{1,63}(?<!-))$',
re.IGNORECASE)
def dump_http(method, url, request_headers, response, output_stream):
"""
Dump all headers and response headers into output_stream.
:param request_headers: Dictionary of HTTP request headers.
:param response_headers: Dictionary of HTTP response headers.
:param output_stream: Stream where the request is being dumped at.
"""
# Start header.
output_stream.write('---------START-HTTP---------\n')
# Get parsed url.
parsed_url = urlsplit(url)
# Dump all request headers recursively.
http_path = parsed_url.path
if parsed_url.query:
http_path = http_path + '?' + parsed_url.query
output_stream.write('{0} {1} HTTP/1.1\n'.format(method,
http_path))
for k, v in list(request_headers.items()):
if k is 'authorization':
# Redact signature header value from trace logs.
v = re.sub(r'Signature=([[0-9a-f]+)', 'Signature=*REDACTED*', v)
output_stream.write('{0}: {1}\n'.format(k.title(), v))
# Write a new line.
output_stream.write('\n')
# Write response status code.
output_stream.write('HTTP/1.1 {0}\n'.format(response.status))
# Dump all response headers recursively.
for k, v in list(response.getheaders().items()):
output_stream.write('{0}: {1}\n'.format(k.title(), v))
# For all errors write all the available response body.
if response.status != 200 and \
response.status != 204 and response.status != 206:
output_stream.write('{0}'.format(response.read()))
# End header.
output_stream.write('---------END-HTTP---------\n')
def mkdir_p(path):
"""
Recursively creates parent and sub directories.
:param path:
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class PartMetadata(object):
"""
Parts manager split parts metadata :class:`PartMetadata <PartMetadata>`.
:param data: Part writer object backed by temporary file.
:param md5digest: Md5sum digest of the part.
:param sha256digest: Sha256sum digest of the part.
:param size: Size of the part.
"""
def __init__(self, data, md5digest, sha256digest, size):
self.data = data
self.md5digest = md5digest
self.sha256digest = sha256digest
self.size = size
def parts_manager(data, part_size=5*1024*1024):
"""
Reads data and provides temporary files of a given size.
:param data: Input reader object which needs to be saved.
:param part_size: Individual part number defaults to 5MB.
:return: Returns :class:`PartMetadata <PartMetadata>`
"""
tmpdata = io.BytesIO()
md5hasher = hashlib.md5()
sha256hasher = hashlib.sha256()
total_read = 0
while total_read < part_size:
current_data = data.read(1024)
if not current_data or len(current_data) == 0:
break
tmpdata.write(current_data)
md5hasher.update(current_data)
sha256hasher.update(current_data)
total_read = total_read + len(current_data)
return PartMetadata(tmpdata, md5hasher.digest(),
sha256hasher.digest(), total_read)
def ignore_headers(headers_to_sign):
"""
Ignore headers.
"""
# Excerpts from @lsegal -
# https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
#
# User-Agent:
#
# This is ignored from signing because signing this causes problems
# with generating pre-signed URLs (that are executed by other agents)
# or when customers pass requests through proxies, which may modify
# the user-agent.
#
# Content-Length:
#
# This is ignored from signing because generating a pre-signed URL
# should not provide a content-length constraint, specifically when
# vending a S3 pre-signed PUT URL. The corollary to this is that when
# sending regular requests (non-pre-signed), the signature contains
# a checksum of the body, which implicitly validates the payload
# length (since changing the number of bytes would change the
# checksum) and therefore this header is not valuable in the
# signature.
#
# Content-Type:
#
# Signing this header causes quite a number of problems in browser
# environments, where browsers like to modify and normalize the
# content-type header in different ways. There is more information
# on this in https://github.com/aws/aws-sdk-js/issues/244. Avoiding
# this field simplifies logic and reduces the possibility of bugs.
#
# Authorization:
#
# Is skipped for obvious reasons
ignored_headers = ['Authorization', 'Content-Length',
'Content-Type', 'User-Agent']
for ignored_header in ignored_headers:
if ignored_header in headers_to_sign:
del headers_to_sign[ignored_header]
return headers_to_sign
AWS_S3_ENDPOINT_MAP = {
'us-east-1': 's3.amazonaws.com',
'us-west-2': 's3-us-west-2.amazonaws.com',
'us-west-1': 's3-us-west-1.amazonaws.com',
'eu-west-1': 's3-eu-west-1.amazonaws.com',
'sa-east-1': 's3-sa-east-1.amazonaws.com',
'eu-central-1': 's3-eu-central-1.amazonaws.com',
'ap-southeast-1': 's3-ap-southeast-1.amazonaws.com',
'ap-northeast-1': 's3-ap-northeast-1.amazonaws.com',
'ap-northeast-2': 's3-ap-northeast-2.amazonaws.com',
}
def get_s3_endpoint(region):
if region in AWS_S3_ENDPOINT_MAP:
return AWS_S3_ENDPOINT_MAP[region]
return 's3.amazonaws.com'
def get_target_url(endpoint_url, bucket_name=None, object_name=None,
bucket_region='us-east-1', query=None):
"""
Construct final target url.
:param endpoint_url: Target endpoint url where request is served to.
:param bucket_name: Bucket component for the target url.
:param object_name: Object component for the target url.
:param bucket_region: Bucket region for the target url.
:param query: Query parameters as a *dict* for the target url.
:return: Returns final target url as *str*.
"""
# New url
url = None
# Parse url
parsed_url = urlsplit(endpoint_url)
# Get new host, scheme.
host = parsed_url.netloc
if 's3.amazonaws.com' in host:
host = get_s3_endpoint(bucket_region)
scheme = parsed_url.scheme
url = scheme + '://' + host
if bucket_name:
# Save if target url will have buckets which suppport
# virtual host.
is_virtual_host_style = is_virtual_host(endpoint_url,
bucket_name)
if is_virtual_host_style:
url = (scheme + '://' + bucket_name + '.' + host)
else:
url = (scheme + '://' + host + '/' + bucket_name)
url_components = [url]
url_components.append('/')
if object_name:
object_name = encode_object_name(object_name)
url_components.append(object_name)
if query:
ordered_query = collections.OrderedDict(sorted(query.items()))
query_components = []
for component_key in ordered_query:
single_component = [component_key]
if ordered_query[component_key] is not None:
single_component.append('=')
encoded_query = urlencode(
str(ordered_query[component_key])).replace(
'/',
'%2F')
single_component.append(encoded_query)
query_components.append(''.join(single_component))
query_string = '&'.join(query_components)
if query_string:
url_components.append('?')
url_components.append(query_string)
return ''.join(url_components)
def is_valid_endpoint(endpoint):
"""
Verify if endpoint is valid.
:type endpoint: string
:param endpoint: An endpoint. Must have at least a scheme and a hostname.
:return: True if the endpoint is valid. Raise :exc:`InvalidEndpointError`
otherwise.
"""
try:
if urlsplit(endpoint).scheme:
raise InvalidEndpointError('Hostname cannot have a scheme.')
hostname = endpoint.split(':')[0]
if hostname is None:
raise InvalidEndpointError('Hostname cannot be empty.')
if len(hostname) > 255:
raise InvalidEndpointError('Hostname cannot be greater than 255.')
if hostname[-1] == '.':
hostname = hostname[:-1]
if not _ALLOWED_HOSTNAME_REGEX.match(hostname):
raise InvalidEndpointError('Hostname does not meet URL standards.')
if hostname.endswith('.amazonaws.com') and \
(hostname != 's3.amazonaws.com'):
raise InvalidEndpointError('Amazon S3 hostname should be '
's3.amazonaws.com.')
except AttributeError as error:
raise TypeError(error)
return True
def is_virtual_host(endpoint_url, bucket_name):
"""
Check to see if the ``bucket_name`` can be part of virtual host
style.
:param endpoint_url: Endpoint url which will be used for virtual host.
:param bucket_name: Bucket name to be validated against.
"""
is_valid_bucket_name(bucket_name)
parsed_url = urlsplit(endpoint_url)
# bucket_name can be valid but '.' in the hostname will fail
# SSL certificate validation. So do not use host-style for
# such buckets.
if 'https' in parsed_url.scheme and '.' in bucket_name:
return False
if 's3.amazonaws.com' in parsed_url.netloc:
return True
return False
def is_valid_bucket_name(bucket_name):
"""
Check to see if the ``bucket_name`` complies with the
restricted DNS naming conventions necessary to allow
access via virtual-hosting style.
:param bucket_name: Bucket name in *str*.
:return: True if the bucket is valid. Raise :exc:`InvalidBucketError`
otherwise.
"""
# Verify bucket name length.
if len(bucket_name) < 3:
raise InvalidBucketError('Bucket name cannot be less than'
' 3 characters.')
if len(bucket_name) > 63:
raise InvalidBucketError('Bucket name cannot be more than'
' 63 characters.')
if '..' in bucket_name:
raise InvalidBucketError('Bucket name cannot have successive'
' periods.')
match = _VALID_BUCKETNAME_REGEX.match(bucket_name)
if match is None or match.end() != len(bucket_name):
raise InvalidBucketError('Bucket name does not follow S3 standards.'
' Bucket: {0}'.format(bucket_name))
return True
def is_non_empty_string(input_string):
"""
Validate if non empty string
:param input_string: Input is a *str*.
:return: True if input is string and non empty.
Raise :exc:`Exception` otherwise.
"""
try:
if not input_string.strip():
raise ValueError()
except AttributeError as error:
raise TypeError(error)
return True
def encode_object_name(object_name):
"""
URL encode input object name.
:param object_name: Un-encoded object name.
:return: URL encoded input object name.
"""
is_non_empty_string(object_name)
return urlencode(object_name)
def get_sha256(content):
"""
Calculate sha256 digest of input byte array.
:param content: Input byte array.
:return: sha256 digest of input byte array.
"""
if len(content) == 0:
content = b''
hasher = hashlib.sha256()
hasher.update(content)
return hasher.digest()
def get_md5(content):
"""
Calculate md5 digest of input byte array.
:param content: Input byte array.
:return: md5 digest of input byte array.
"""
if len(content) == 0:
content = b''
hasher = hashlib.md5()
hasher.update(content)
return hasher.digest()
def encode_to_base64(content):
"""
Calculate base64 of input byte array.
:param content: Input byte array.
:return: base64 encoding of input byte array.
"""
return binascii.b2a_base64(content).strip().decode('utf-8')
def encode_to_hex(content):
"""
Calculate hex for input byte array.
:param content: Input byte array.
:return: hexlified input byte array.
"""
return binascii.hexlify(content)
def optimal_part_info(length):
"""
Calculate optimal part size for multipart uploads.
:param length: Input length to calculate part size of.
:return: Optimal part size.
"""
# object size is '-1' set it to 5TiB.
if length == -1:
length = MAX_MULTIPART_OBJECT_SIZE
if length > MAX_MULTIPART_OBJECT_SIZE:
raise InvalidArgumentError('Input content size is bigger '
' than allowed maximum of 5TiB.')
# Use floats for part size for all calculations to avoid
# overflows during float64 to int64 conversions.
part_size_float = math.ceil(length/MAX_MULTIPART_COUNT)
part_size_float = (math.ceil(part_size_float/MIN_OBJECT_SIZE)
* MIN_OBJECT_SIZE)
# Total parts count.
total_parts_count = int(math.ceil(length/part_size_float))
# Part size.
part_size = int(part_size_float)
# Last part size.
last_part_size = length - int(total_parts_count-1)*part_size
return total_parts_count, part_size, last_part_size
| harshavardhana/minio-py | minio/helpers.py | Python | apache-2.0 | 15,117 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'InfraStatistics.Interfaces.Interface.Cache.Protocols.Protocol' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Cache.Protocols.Protocol',
False,
[
_MetaInfoClassMember('protocol-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Name of the protocol
''',
'protocol_name',
'Cisco-IOS-XR-infra-statsd-oper', True),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input data rate in 1000's of bps
''',
'input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input packets per second
''',
'input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output data rate in 1000's of bps
''',
'output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output packets per second
''',
'output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Protocol number
''',
'protocol',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'protocol',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Cache.Protocols' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Cache.Protocols',
False,
[
_MetaInfoClassMember('protocol', REFERENCE_LIST, 'Protocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Cache.Protocols.Protocol',
[], [],
''' Interface counters per protocol
''',
'protocol',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'protocols',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Cache.InterfacesMibCounters' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Cache.InterfacesMibCounters',
False,
[
_MetaInfoClassMember('applique', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Applique
''',
'applique',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('availability-flag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Availability bit mask
''',
'availability_flag',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets received
''',
'broadcast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets sent
''',
'broadcast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('carrier-transitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Carrier transitions
''',
'carrier_transitions',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input CRC errors
''',
'crc_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('framing-errors-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing-errors received
''',
'framing_errors_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('giant-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received giant packets
''',
'giant_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-aborts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input aborts
''',
'input_aborts',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input drops
''',
'input_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input errors
''',
'input_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-ignored-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input ignored packets
''',
'input_ignored_packets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-overruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input overruns
''',
'input_overruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input queue drops
''',
'input_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-discontinuity-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SysUpTime when counters were last reset (in
seconds)
''',
'last_discontinuity_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets received
''',
'multicast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets sent
''',
'multicast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffer-failures', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffer failures
''',
'output_buffer_failures',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffers-swapped-out', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffers swapped out
''',
'output_buffers_swapped_out',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output drops
''',
'output_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output errors
''',
'output_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output queue drops
''',
'output_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-underruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output underruns
''',
'output_underruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('parity-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received parity packets
''',
'parity_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('resets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of board resets
''',
'resets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('runt-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received runt packets
''',
'runt_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-last-clear-counters', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of seconds since last clear counters
''',
'seconds_since_last_clear_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet received
''',
'seconds_since_packet_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-sent', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet sent
''',
'seconds_since_packet_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('throttled-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received throttled packets
''',
'throttled_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('unknown-protocol-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Unknown protocol packets received
''',
'unknown_protocol_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'interfaces-mib-counters',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Cache.DataRate' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Cache.DataRate',
False,
[
_MetaInfoClassMember('bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Bandwidth (in kbps)
''',
'bandwidth',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input data rate in 1000's of bps
''',
'input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-load', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Input load as fraction of 255
''',
'input_load',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input packets per second
''',
'input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('load-interval', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of 30-sec intervals less one
''',
'load_interval',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output data rate in 1000's of bps
''',
'output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-load', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Output load as fraction of 255
''',
'output_load',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output packets per second
''',
'output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak input data rate
''',
'peak_input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak input packet rate
''',
'peak_input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak output data rate
''',
'peak_output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak output packet rate
''',
'peak_output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('reliability', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Reliability coefficient
''',
'reliability',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'data-rate',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Cache.GenericCounters' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Cache.GenericCounters',
False,
[
_MetaInfoClassMember('applique', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Applique
''',
'applique',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('availability-flag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Availability bit mask
''',
'availability_flag',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets received
''',
'broadcast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets sent
''',
'broadcast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('carrier-transitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Carrier transitions
''',
'carrier_transitions',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input CRC errors
''',
'crc_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('framing-errors-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing-errors received
''',
'framing_errors_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('giant-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received giant packets
''',
'giant_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-aborts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input aborts
''',
'input_aborts',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input drops
''',
'input_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input errors
''',
'input_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-ignored-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input ignored packets
''',
'input_ignored_packets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-overruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input overruns
''',
'input_overruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input queue drops
''',
'input_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-discontinuity-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SysUpTime when counters were last reset (in
seconds)
''',
'last_discontinuity_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets received
''',
'multicast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets sent
''',
'multicast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffer-failures', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffer failures
''',
'output_buffer_failures',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffers-swapped-out', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffers swapped out
''',
'output_buffers_swapped_out',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output drops
''',
'output_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output errors
''',
'output_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output queue drops
''',
'output_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-underruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output underruns
''',
'output_underruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('parity-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received parity packets
''',
'parity_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('resets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of board resets
''',
'resets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('runt-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received runt packets
''',
'runt_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-last-clear-counters', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of seconds since last clear counters
''',
'seconds_since_last_clear_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet received
''',
'seconds_since_packet_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-sent', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet sent
''',
'seconds_since_packet_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('throttled-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received throttled packets
''',
'throttled_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('unknown-protocol-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Unknown protocol packets received
''',
'unknown_protocol_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'generic-counters',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Cache' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Cache',
False,
[
_MetaInfoClassMember('data-rate', REFERENCE_CLASS, 'DataRate' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Cache.DataRate',
[], [],
''' Datarate information
''',
'data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('generic-counters', REFERENCE_CLASS, 'GenericCounters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Cache.GenericCounters',
[], [],
''' Generic set of interface counters
''',
'generic_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('interfaces-mib-counters', REFERENCE_CLASS, 'InterfacesMibCounters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Cache.InterfacesMibCounters',
[], [],
''' Set of interface counters as displayed by the
InterfacesMIB
''',
'interfaces_mib_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('protocols', REFERENCE_CLASS, 'Protocols' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Cache.Protocols',
[], [],
''' List of protocols
''',
'protocols',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'cache',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Latest.Protocols.Protocol' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Latest.Protocols.Protocol',
False,
[
_MetaInfoClassMember('protocol-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Name of the protocol
''',
'protocol_name',
'Cisco-IOS-XR-infra-statsd-oper', True),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input data rate in 1000's of bps
''',
'input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input packets per second
''',
'input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output data rate in 1000's of bps
''',
'output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output packets per second
''',
'output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Protocol number
''',
'protocol',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'protocol',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Latest.Protocols' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Latest.Protocols',
False,
[
_MetaInfoClassMember('protocol', REFERENCE_LIST, 'Protocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Latest.Protocols.Protocol',
[], [],
''' Interface counters per protocol
''',
'protocol',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'protocols',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Latest.InterfacesMibCounters' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Latest.InterfacesMibCounters',
False,
[
_MetaInfoClassMember('applique', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Applique
''',
'applique',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('availability-flag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Availability bit mask
''',
'availability_flag',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets received
''',
'broadcast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets sent
''',
'broadcast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('carrier-transitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Carrier transitions
''',
'carrier_transitions',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input CRC errors
''',
'crc_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('framing-errors-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing-errors received
''',
'framing_errors_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('giant-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received giant packets
''',
'giant_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-aborts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input aborts
''',
'input_aborts',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input drops
''',
'input_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input errors
''',
'input_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-ignored-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input ignored packets
''',
'input_ignored_packets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-overruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input overruns
''',
'input_overruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input queue drops
''',
'input_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-discontinuity-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SysUpTime when counters were last reset (in
seconds)
''',
'last_discontinuity_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets received
''',
'multicast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets sent
''',
'multicast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffer-failures', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffer failures
''',
'output_buffer_failures',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffers-swapped-out', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffers swapped out
''',
'output_buffers_swapped_out',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output drops
''',
'output_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output errors
''',
'output_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output queue drops
''',
'output_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-underruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output underruns
''',
'output_underruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('parity-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received parity packets
''',
'parity_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('resets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of board resets
''',
'resets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('runt-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received runt packets
''',
'runt_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-last-clear-counters', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of seconds since last clear counters
''',
'seconds_since_last_clear_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet received
''',
'seconds_since_packet_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-sent', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet sent
''',
'seconds_since_packet_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('throttled-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received throttled packets
''',
'throttled_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('unknown-protocol-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Unknown protocol packets received
''',
'unknown_protocol_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'interfaces-mib-counters',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Latest.DataRate' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Latest.DataRate',
False,
[
_MetaInfoClassMember('bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Bandwidth (in kbps)
''',
'bandwidth',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input data rate in 1000's of bps
''',
'input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-load', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Input load as fraction of 255
''',
'input_load',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input packets per second
''',
'input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('load-interval', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of 30-sec intervals less one
''',
'load_interval',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output data rate in 1000's of bps
''',
'output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-load', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Output load as fraction of 255
''',
'output_load',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output packets per second
''',
'output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak input data rate
''',
'peak_input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak input packet rate
''',
'peak_input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak output data rate
''',
'peak_output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak output packet rate
''',
'peak_output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('reliability', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Reliability coefficient
''',
'reliability',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'data-rate',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Latest.GenericCounters' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Latest.GenericCounters',
False,
[
_MetaInfoClassMember('applique', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Applique
''',
'applique',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('availability-flag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Availability bit mask
''',
'availability_flag',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets received
''',
'broadcast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets sent
''',
'broadcast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('carrier-transitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Carrier transitions
''',
'carrier_transitions',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input CRC errors
''',
'crc_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('framing-errors-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing-errors received
''',
'framing_errors_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('giant-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received giant packets
''',
'giant_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-aborts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input aborts
''',
'input_aborts',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input drops
''',
'input_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input errors
''',
'input_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-ignored-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input ignored packets
''',
'input_ignored_packets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-overruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input overruns
''',
'input_overruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input queue drops
''',
'input_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-discontinuity-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SysUpTime when counters were last reset (in
seconds)
''',
'last_discontinuity_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets received
''',
'multicast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets sent
''',
'multicast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffer-failures', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffer failures
''',
'output_buffer_failures',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffers-swapped-out', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffers swapped out
''',
'output_buffers_swapped_out',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output drops
''',
'output_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output errors
''',
'output_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output queue drops
''',
'output_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-underruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output underruns
''',
'output_underruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('parity-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received parity packets
''',
'parity_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('resets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of board resets
''',
'resets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('runt-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received runt packets
''',
'runt_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-last-clear-counters', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of seconds since last clear counters
''',
'seconds_since_last_clear_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet received
''',
'seconds_since_packet_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-sent', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet sent
''',
'seconds_since_packet_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('throttled-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received throttled packets
''',
'throttled_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('unknown-protocol-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Unknown protocol packets received
''',
'unknown_protocol_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'generic-counters',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Latest' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Latest',
False,
[
_MetaInfoClassMember('data-rate', REFERENCE_CLASS, 'DataRate' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Latest.DataRate',
[], [],
''' Datarate information
''',
'data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('generic-counters', REFERENCE_CLASS, 'GenericCounters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Latest.GenericCounters',
[], [],
''' Generic set of interface counters
''',
'generic_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('interfaces-mib-counters', REFERENCE_CLASS, 'InterfacesMibCounters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Latest.InterfacesMibCounters',
[], [],
''' Set of interface counters as displayed by the
InterfacesMIB
''',
'interfaces_mib_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('protocols', REFERENCE_CLASS, 'Protocols' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Latest.Protocols',
[], [],
''' List of protocols
''',
'protocols',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'latest',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Total.Protocols.Protocol' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Total.Protocols.Protocol',
False,
[
_MetaInfoClassMember('protocol-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Name of the protocol
''',
'protocol_name',
'Cisco-IOS-XR-infra-statsd-oper', True),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input data rate in 1000's of bps
''',
'input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input packets per second
''',
'input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output data rate in 1000's of bps
''',
'output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output packets per second
''',
'output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Protocol number
''',
'protocol',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'protocol',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Total.Protocols' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Total.Protocols',
False,
[
_MetaInfoClassMember('protocol', REFERENCE_LIST, 'Protocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Total.Protocols.Protocol',
[], [],
''' Interface counters per protocol
''',
'protocol',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'protocols',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Total.InterfacesMibCounters' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Total.InterfacesMibCounters',
False,
[
_MetaInfoClassMember('applique', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Applique
''',
'applique',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('availability-flag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Availability bit mask
''',
'availability_flag',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets received
''',
'broadcast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets sent
''',
'broadcast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('carrier-transitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Carrier transitions
''',
'carrier_transitions',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input CRC errors
''',
'crc_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('framing-errors-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing-errors received
''',
'framing_errors_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('giant-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received giant packets
''',
'giant_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-aborts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input aborts
''',
'input_aborts',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input drops
''',
'input_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input errors
''',
'input_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-ignored-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input ignored packets
''',
'input_ignored_packets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-overruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input overruns
''',
'input_overruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input queue drops
''',
'input_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-discontinuity-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SysUpTime when counters were last reset (in
seconds)
''',
'last_discontinuity_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets received
''',
'multicast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets sent
''',
'multicast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffer-failures', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffer failures
''',
'output_buffer_failures',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffers-swapped-out', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffers swapped out
''',
'output_buffers_swapped_out',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output drops
''',
'output_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output errors
''',
'output_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output queue drops
''',
'output_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-underruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output underruns
''',
'output_underruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('parity-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received parity packets
''',
'parity_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('resets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of board resets
''',
'resets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('runt-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received runt packets
''',
'runt_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-last-clear-counters', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of seconds since last clear counters
''',
'seconds_since_last_clear_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet received
''',
'seconds_since_packet_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-sent', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet sent
''',
'seconds_since_packet_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('throttled-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received throttled packets
''',
'throttled_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('unknown-protocol-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Unknown protocol packets received
''',
'unknown_protocol_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'interfaces-mib-counters',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Total.DataRate' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Total.DataRate',
False,
[
_MetaInfoClassMember('bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Bandwidth (in kbps)
''',
'bandwidth',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input data rate in 1000's of bps
''',
'input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-load', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Input load as fraction of 255
''',
'input_load',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input packets per second
''',
'input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('load-interval', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of 30-sec intervals less one
''',
'load_interval',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output data rate in 1000's of bps
''',
'output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-load', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Output load as fraction of 255
''',
'output_load',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output packets per second
''',
'output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak input data rate
''',
'peak_input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak input packet rate
''',
'peak_input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak output data rate
''',
'peak_output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak output packet rate
''',
'peak_output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('reliability', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Reliability coefficient
''',
'reliability',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'data-rate',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Total.GenericCounters' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Total.GenericCounters',
False,
[
_MetaInfoClassMember('applique', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Applique
''',
'applique',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('availability-flag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Availability bit mask
''',
'availability_flag',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets received
''',
'broadcast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets sent
''',
'broadcast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('carrier-transitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Carrier transitions
''',
'carrier_transitions',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input CRC errors
''',
'crc_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('framing-errors-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing-errors received
''',
'framing_errors_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('giant-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received giant packets
''',
'giant_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-aborts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input aborts
''',
'input_aborts',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input drops
''',
'input_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input errors
''',
'input_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-ignored-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input ignored packets
''',
'input_ignored_packets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-overruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input overruns
''',
'input_overruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input queue drops
''',
'input_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-discontinuity-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SysUpTime when counters were last reset (in
seconds)
''',
'last_discontinuity_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets received
''',
'multicast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets sent
''',
'multicast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffer-failures', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffer failures
''',
'output_buffer_failures',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffers-swapped-out', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffers swapped out
''',
'output_buffers_swapped_out',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output drops
''',
'output_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output errors
''',
'output_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output queue drops
''',
'output_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-underruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output underruns
''',
'output_underruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('parity-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received parity packets
''',
'parity_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('resets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of board resets
''',
'resets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('runt-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received runt packets
''',
'runt_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-last-clear-counters', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of seconds since last clear counters
''',
'seconds_since_last_clear_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet received
''',
'seconds_since_packet_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-sent', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet sent
''',
'seconds_since_packet_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('throttled-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received throttled packets
''',
'throttled_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('unknown-protocol-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Unknown protocol packets received
''',
'unknown_protocol_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'generic-counters',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Total' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Total',
False,
[
_MetaInfoClassMember('data-rate', REFERENCE_CLASS, 'DataRate' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Total.DataRate',
[], [],
''' Datarate information
''',
'data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('generic-counters', REFERENCE_CLASS, 'GenericCounters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Total.GenericCounters',
[], [],
''' Generic set of interface counters
''',
'generic_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('interfaces-mib-counters', REFERENCE_CLASS, 'InterfacesMibCounters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Total.InterfacesMibCounters',
[], [],
''' Set of interface counters as displayed by the
InterfacesMIB
''',
'interfaces_mib_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('protocols', REFERENCE_CLASS, 'Protocols' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Total.Protocols',
[], [],
''' List of protocols
''',
'protocols',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'total',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Protocols.Protocol' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Protocols.Protocol',
False,
[
_MetaInfoClassMember('protocol-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Name of the protocol
''',
'protocol_name',
'Cisco-IOS-XR-infra-statsd-oper', True),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input data rate in 1000's of bps
''',
'input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input packets per second
''',
'input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output data rate in 1000's of bps
''',
'output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output packets per second
''',
'output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('protocol', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Protocol number
''',
'protocol',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'protocol',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.Protocols' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.Protocols',
False,
[
_MetaInfoClassMember('protocol', REFERENCE_LIST, 'Protocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Protocols.Protocol',
[], [],
''' Interface counters per protocol
''',
'protocol',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'protocols',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.InterfacesMibCounters' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.InterfacesMibCounters',
False,
[
_MetaInfoClassMember('applique', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Applique
''',
'applique',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('availability-flag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Availability bit mask
''',
'availability_flag',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets received
''',
'broadcast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets sent
''',
'broadcast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('carrier-transitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Carrier transitions
''',
'carrier_transitions',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input CRC errors
''',
'crc_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('framing-errors-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing-errors received
''',
'framing_errors_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('giant-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received giant packets
''',
'giant_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-aborts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input aborts
''',
'input_aborts',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input drops
''',
'input_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input errors
''',
'input_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-ignored-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input ignored packets
''',
'input_ignored_packets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-overruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input overruns
''',
'input_overruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input queue drops
''',
'input_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-discontinuity-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SysUpTime when counters were last reset (in
seconds)
''',
'last_discontinuity_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets received
''',
'multicast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets sent
''',
'multicast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffer-failures', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffer failures
''',
'output_buffer_failures',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffers-swapped-out', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffers swapped out
''',
'output_buffers_swapped_out',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output drops
''',
'output_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output errors
''',
'output_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output queue drops
''',
'output_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-underruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output underruns
''',
'output_underruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('parity-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received parity packets
''',
'parity_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('resets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of board resets
''',
'resets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('runt-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received runt packets
''',
'runt_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-last-clear-counters', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of seconds since last clear counters
''',
'seconds_since_last_clear_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet received
''',
'seconds_since_packet_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-sent', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet sent
''',
'seconds_since_packet_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('throttled-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received throttled packets
''',
'throttled_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('unknown-protocol-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Unknown protocol packets received
''',
'unknown_protocol_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'interfaces-mib-counters',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.DataRate' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.DataRate',
False,
[
_MetaInfoClassMember('bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Bandwidth (in kbps)
''',
'bandwidth',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input data rate in 1000's of bps
''',
'input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-load', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Input load as fraction of 255
''',
'input_load',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Input packets per second
''',
'input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('load-interval', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of 30-sec intervals less one
''',
'load_interval',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output data rate in 1000's of bps
''',
'output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-load', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Output load as fraction of 255
''',
'output_load',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Output packets per second
''',
'output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-input-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak input data rate
''',
'peak_input_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-input-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak input packet rate
''',
'peak_input_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-output-data-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak output data rate
''',
'peak_output_data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('peak-output-packet-rate', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Peak output packet rate
''',
'peak_output_packet_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('reliability', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Reliability coefficient
''',
'reliability',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'data-rate',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface.GenericCounters' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface.GenericCounters',
False,
[
_MetaInfoClassMember('applique', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Applique
''',
'applique',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('availability-flag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Availability bit mask
''',
'availability_flag',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets received
''',
'broadcast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('broadcast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Broadcast packets sent
''',
'broadcast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes received
''',
'bytes_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('bytes-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Bytes sent
''',
'bytes_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('carrier-transitions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Carrier transitions
''',
'carrier_transitions',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('crc-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input CRC errors
''',
'crc_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('framing-errors-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Framing-errors received
''',
'framing_errors_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('giant-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received giant packets
''',
'giant_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-aborts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input aborts
''',
'input_aborts',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input drops
''',
'input_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total input errors
''',
'input_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-ignored-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input ignored packets
''',
'input_ignored_packets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-overruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input overruns
''',
'input_overruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('input-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input queue drops
''',
'input_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-data-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Time when counters were last written (in
seconds)
''',
'last_data_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('last-discontinuity-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' SysUpTime when counters were last reset (in
seconds)
''',
'last_discontinuity_time',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets received
''',
'multicast_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('multicast-packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Multicast packets sent
''',
'multicast_packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffer-failures', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffer failures
''',
'output_buffer_failures',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-buffers-swapped-out', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output buffers swapped out
''',
'output_buffers_swapped_out',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output drops
''',
'output_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-errors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total output errors
''',
'output_errors',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-queue-drops', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output queue drops
''',
'output_queue_drops',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('output-underruns', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output underruns
''',
'output_underruns',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets received
''',
'packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('packets-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets sent
''',
'packets_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('parity-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received parity packets
''',
'parity_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('resets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of board resets
''',
'resets',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('runt-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received runt packets
''',
'runt_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-last-clear-counters', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of seconds since last clear counters
''',
'seconds_since_last_clear_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet received
''',
'seconds_since_packet_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('seconds-since-packet-sent', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Seconds since packet sent
''',
'seconds_since_packet_sent',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('throttled-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Received throttled packets
''',
'throttled_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('unknown-protocol-packets-received', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Unknown protocol packets received
''',
'unknown_protocol_packets_received',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'generic-counters',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of the interface
''',
'interface_name',
'Cisco-IOS-XR-infra-statsd-oper', True),
_MetaInfoClassMember('cache', REFERENCE_CLASS, 'Cache' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Cache',
[], [],
''' Cached stats data of interfaces
''',
'cache',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('data-rate', REFERENCE_CLASS, 'DataRate' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.DataRate',
[], [],
''' Datarate information
''',
'data_rate',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('generic-counters', REFERENCE_CLASS, 'GenericCounters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.GenericCounters',
[], [],
''' Generic set of interface counters
''',
'generic_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('interfaces-mib-counters', REFERENCE_CLASS, 'InterfacesMibCounters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.InterfacesMibCounters',
[], [],
''' Set of interface counters as displayed by the
InterfacesMIB
''',
'interfaces_mib_counters',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('latest', REFERENCE_CLASS, 'Latest' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Latest',
[], [],
''' Latest stats data of interfaces
''',
'latest',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('protocols', REFERENCE_CLASS, 'Protocols' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Protocols',
[], [],
''' List of protocols
''',
'protocols',
'Cisco-IOS-XR-infra-statsd-oper', False),
_MetaInfoClassMember('total', REFERENCE_CLASS, 'Total' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface.Total',
[], [],
''' Total stats data of interfaces
''',
'total',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics.Interfaces' : {
'meta_info' : _MetaInfoClass('InfraStatistics.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces.Interface',
[], [],
''' Statistics of an interface
''',
'interface',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
'InfraStatistics' : {
'meta_info' : _MetaInfoClass('InfraStatistics',
False,
[
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper', 'InfraStatistics.Interfaces',
[], [],
''' List of interfaces
''',
'interfaces',
'Cisco-IOS-XR-infra-statsd-oper', False),
],
'Cisco-IOS-XR-infra-statsd-oper',
'infra-statistics',
_yang_ns._namespaces['Cisco-IOS-XR-infra-statsd-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_statsd_oper'
),
},
}
_meta_table['InfraStatistics.Interfaces.Interface.Cache.Protocols.Protocol']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Cache.Protocols']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Cache.Protocols']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Cache']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Cache.InterfacesMibCounters']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Cache']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Cache.DataRate']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Cache']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Cache.GenericCounters']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Cache']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Latest.Protocols.Protocol']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Latest.Protocols']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Latest.Protocols']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Latest']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Latest.InterfacesMibCounters']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Latest']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Latest.DataRate']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Latest']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Latest.GenericCounters']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Latest']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Total.Protocols.Protocol']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Total.Protocols']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Total.Protocols']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Total']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Total.InterfacesMibCounters']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Total']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Total.DataRate']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Total']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Total.GenericCounters']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Total']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Protocols.Protocol']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface.Protocols']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Cache']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Latest']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Total']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.Protocols']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.InterfacesMibCounters']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.DataRate']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface.GenericCounters']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces.Interface']['meta_info']
_meta_table['InfraStatistics.Interfaces.Interface']['meta_info'].parent =_meta_table['InfraStatistics.Interfaces']['meta_info']
_meta_table['InfraStatistics.Interfaces']['meta_info'].parent =_meta_table['InfraStatistics']['meta_info']
| 111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_infra_statsd_oper.py | Python | apache-2.0 | 145,349 |
#!/usr/bin/env python
# @author: Yannick Dayer <yannick.dayer@idiap.ch>
# @date: Wed 16 Jun 2021 17:21:47 UTC+02
import csv
import logging
from pathlib import Path
import click
from tqdm import tqdm
import bob.io.audio
import bob.io.base
from bob.bio.base.database import CSVDataset
from bob.bio.base.database import CSVToSampleLoaderBiometrics
from bob.extension import rc
from bob.extension.download import download_and_unzip
from bob.extension.download import get_file
from bob.extension.download import search_file
from bob.extension.scripts.click_helper import verbosity_option
logger = logging.getLogger(__name__)
def get_voxforge_protocol_file():
"""Returns the protocol definition archive, downloading it if necessary.
Looks into ``bob_data_folder``, into the ``datasets`` folder for the file, and
download it from https://www.idiap.ch/software/bob/data/bob/bob.bio.spear/ if
needed.
"""
proto_def_hash = "dc84ac65"
proto_def_name = f"database-protocols-voxforge-{proto_def_hash}.tar.gz"
proto_def_urls = [
f"https://www.idiap.ch/software/bob/data/bob/bob.bio.spear/{proto_def_name}",
f"http://www.idiap.ch/software/bob/data/bob/bob.bio.spear/{proto_def_name}",
]
logger.info(f"Retrieving protocol definition file '{proto_def_name}'.")
return get_file(
filename=proto_def_name,
urls=proto_def_urls,
file_hash=proto_def_hash,
cache_subdir="datasets",
)
def VoxforgeBioDatabase(
protocol="Default", dataset_protocol_path=None, data_path=None, **kwargs
):
"""Database interface for the VoxForge dataset subset for speaker recognition.
This database interface is meant to be used with the vanilla-biometrics pipeline.
Given a series of CSV files (or downloading them from the bob data server), it
creates the Sample objects for each roles needed by the pipeline (enroll, probe),
for different groups (dev, eval).
`protocol definition` files are not the `data` files:
- `protocol definition` files are a list of paths and corresponding reference
name. They are available on the bob data server.
- `data` files are the actual files of the dataset (pointed to by the definition
files). They are not provided by bob.
Although not provided by bob, the VoxForge data is freely available online.
If you don't already have the data, download it and set the bob configuration using
the following commands:
``$ bob db download-voxforge -d your_path_to_data``
``$ bob config set bob.db.voxforge.directory your_path_to_data``
Parameters
----------
protocol: str
Name of the protocol to use (subfolder in protocol definition).
dataset_protocol_path: str or None
Path to an existing protocol definition folder structure.
If None: will download the definition files to the path pointed by the
``bob_data_folder`` config (see :py:func:`bob.extension.download.get_file`).
data_path: str or None
Path to the data files of VoxForge.
If None: will use the path in the ``bob.db.voxforge.directory`` config.
"""
if dataset_protocol_path is None:
dataset_protocol_path = get_voxforge_protocol_file()
if data_path is None:
data_path = rc.get("bob.db.voxforge.directory")
if data_path is None:
logger.warning(
"No data path was provided! Either set "
"'bob.db.voxforge.directory' with the 'bob config set' command, or "
"provide a 'data_path' to VoxforgeBioDatabase."
)
logger.info(
f"Database: Will read the CSV protocol definitions in '{dataset_protocol_path}'."
)
logger.info(f"Database: Will read raw data files in '{data_path}'.")
return CSVDataset(
name="VoxForge",
protocol=protocol,
dataset_protocol_path=dataset_protocol_path,
csv_to_sample_loader=CSVToSampleLoaderBiometrics(
data_loader=bob.io.base.load,
dataset_original_directory=data_path,
extension=".wav",
),
allow_scoring_with_all_biometric_references=True,
**kwargs,
)
@click.command(
epilog="""Examples:
\b
$ bob db download-voxforge ./data/
\b
$ bob db download-voxforge --protocol-definition bio-spear-voxforge.tar ./data/
""",
)
@click.option(
"--protocol-definition",
"-p",
default=None,
help=(
"A path to a the protocol definition file of VoxForge. "
"If omitted, will use the default protocol definition file at "
"`https://www.idiap.ch/software/bob/data/bob/bob.bio.spear`."
),
)
@click.option(
"--force-download",
"-f",
is_flag=True,
help="Download a file even if it already exists locally.",
)
@click.argument("destination")
@verbosity_option()
def download_voxforge(protocol_definition, destination, force_download, **kwargs):
"""Downloads a series of VoxForge data files from their repository and untar them.
The files will be downloaded and saved in the `destination` folder then extracted.
The list of URLs is provided in the protocol definition file of Voxforge.
"""
destination = Path(destination)
destination.mkdir(exist_ok=True)
if protocol_definition is None:
protocol_definition = get_voxforge_protocol_file()
# Use the `Default` protocol
protocol = "Default"
# Open the list file
list_file = f"{protocol}/data_files_urls.csv"
open_list_file = search_file(protocol_definition, [list_file])
num_files = sum(1 for _ in open_list_file) - 1
open_list_file.seek(0, 0)
logger.info(f"{num_files} files are listed in {list_file}. Downloading...")
csv_list_file = csv.DictReader(open_list_file)
for row in tqdm(csv_list_file, total=num_files):
full_filename = destination / row["filename"]
if force_download or not full_filename.exists():
logger.debug(f"Downloading {row['filename']} from {row['url']}")
download_and_unzip(urls=[row["url"]], filename=full_filename)
logger.debug(f"Downloaded to {full_filename}")
logger.info(f"Download of {num_files} files completed.")
open_list_file.close()
| bioidiap/bob.bio.spear | bob/bio/spear/database/voxforge.py | Python | gpl-3.0 | 6,256 |
"""Utility file for generating synthetic phrases from input phrases"""
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import torch
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
class UtteranceGenerator:
"""Class to generate synthetic phrases from user defined phrases"""
def __init__(self):
model_name = "tuner007/pegasus_paraphrase"
self.torch_device = "cuda" if torch.cuda.is_available() else "cpu"
self.tokenizer = PegasusTokenizer.from_pretrained(model_name)
self.model = PegasusForConditionalGeneration.from_pretrained(
model_name
).to(self.torch_device)
def get_response(
self,
input_text,
num_return_sequences,
num_beams,
max_length,
truncation,
temperature,
):
"""Individual instance of model to generate synthetic phrases"""
batch = self.tokenizer(
[input_text],
truncation=truncation,
padding="longest",
max_length=max_length,
return_tensors="pt",
).to(self.torch_device)
translated = self.model.generate(
**batch,
max_length=max_length,
num_beams=num_beams,
num_return_sequences=num_return_sequences,
temperature=temperature,
)
tgt_text = self.tokenizer.batch_decode(
translated, skip_special_tokens=True
)
return tgt_text
def generate_utterances(
self,
origin_utterances: pd.DataFrame,
synthetic_instances: int = None,
max_length: int = 60,
truncation: bool = True,
temperature: float = 1.5,
):
"""Make new phrases from a dataframe of existing ones.
Args:
origin_utterances: dataframe specifying the phrases
to generate synthetic phrases from
Columns:
utterance: utterance to generate synthetic phrases from
synthetic_instances (optional): if not set for each phrase
in the dataframe it must be set while calling this function
and will be appied to all phrases
synthetic_instances (optional): int number of synthetic phrases to
generate for each
max_length (optional): int
truncation (optional): boolean
temperature (optional): float
base phrase
Returns:
synthetic_phrases_df: dataframe with new synthetic phrases.
"""
synthetic_phrases_df = pd.DataFrame()
if (
synthetic_instances
and "synthetic_instances" not in origin_utterances.columns
):
origin_utterances["synthetic_instances"] = synthetic_instances
origin_utterances = origin_utterances.reset_index(drop=True)
origin_utterances.insert(0, "id", origin_utterances.index)
for _, row in origin_utterances.iterrows():
iter_frame = pd.DataFrame()
num_beams = int(row["synthetic_instances"])
num_return_sequences = int(row["synthetic_instances"])
utterance = row["training_phrase"]
synthetic_phrases = self.get_response(
utterance,
num_return_sequences,
num_beams,
max_length=max_length,
temperature=temperature,
truncation=truncation,
)
iter_frame["synthetic_phrases"] = synthetic_phrases
for col in origin_utterances.columns:
iter_frame[col] = row[col]
synthetic_phrases_df = synthetic_phrases_df.append(iter_frame)
ordered_cols = [
"id",
"synthetic_instances",
"training_phrase",
"synthetic_phrases",
]
remainder_cols = list(
set(origin_utterances.columns) - set(ordered_cols)
)
column_ordering = ordered_cols + remainder_cols
return synthetic_phrases_df[column_ordering]
| GoogleCloudPlatform/dfcx-scrapi | src/dfcx_scrapi/core_ml/utterance_generator.py | Python | apache-2.0 | 4,665 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NetkitFtp(AutotoolsPackage):
"""netkit-ftp is the original file transfer client program for Linux."""
homepage = "http://ftp.uk.linux.org/pub/linux/Networking/netkit"
git = "https://github.com/mmaraya/netkit-ftp.git"
version('master', branch='master')
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.man.man1)
make('install')
| LLNL/spack | var/spack/repos/builtin/packages/netkit-ftp/package.py | Python | lgpl-2.1 | 622 |
"""
Tests for Course Blocks forms
"""
import ddt
from django.http import Http404, QueryDict
from urllib import urlencode
from rest_framework.exceptions import PermissionDenied
from opaque_keys.edx.locator import CourseLocator
from openedx.core.djangoapps.util.test_forms import FormTestMixin
from student.models import CourseEnrollment
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..forms import BlockListGetForm
@ddt.ddt
class TestBlockListGetForm(FormTestMixin, SharedModuleStoreTestCase):
"""
Tests for BlockListGetForm
"""
FORM_CLASS = BlockListGetForm
@classmethod
def setUpClass(cls):
super(TestBlockListGetForm, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestBlockListGetForm, self).setUp()
self.student = UserFactory.create()
self.student2 = UserFactory.create()
self.staff = UserFactory.create(is_staff=True)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory.create(user=self.student2, course_id=self.course.id)
usage_key = self.course.location
self.initial = {'requesting_user': self.student}
self.form_data = QueryDict(
urlencode({
'username': self.student.username,
'usage_key': unicode(usage_key),
}),
mutable=True,
)
self.cleaned_data = {
'all_blocks': None,
'block_counts': set(),
'depth': 0,
'nav_depth': None,
'return_type': 'dict',
'requested_fields': {'display_name', 'type'},
'student_view_data': set(),
'usage_key': usage_key,
'username': self.student.username,
'user': self.student,
}
def assert_raises_permission_denied(self):
"""
Fail unless permission is denied to the form
"""
with self.assertRaises(PermissionDenied):
self.get_form(expected_valid=False)
def assert_raises_not_found(self):
"""
Fail unless a 404 occurs
"""
with self.assertRaises(Http404):
self.get_form(expected_valid=False)
def assert_equals_cleaned_data(self):
"""
Check that the form returns the expected data
"""
form = self.get_form(expected_valid=True)
self.assertDictEqual(form.cleaned_data, self.cleaned_data)
def test_basic(self):
self.assert_equals_cleaned_data()
#-- usage key
def test_no_usage_key_param(self):
self.form_data.pop('usage_key')
self.assert_error('usage_key', "This field is required.")
def test_invalid_usage_key(self):
self.form_data['usage_key'] = 'invalid_usage_key'
self.assert_error('usage_key', "'invalid_usage_key' is not a valid usage key.")
def test_non_existent_usage_key(self):
self.form_data['usage_key'] = self.store.make_course_usage_key(CourseLocator('non', 'existent', 'course'))
self.assert_raises_permission_denied()
#-- user
@ddt.data("True", "true", True)
def test_no_user_all_blocks_true(self, all_blocks_value):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.form_data['all_blocks'] = all_blocks_value
self.get_form(expected_valid=True)
@ddt.data("False", "false", False)
def test_no_user_all_blocks_false(self, all_blocks_value):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.form_data['all_blocks'] = all_blocks_value
self.assert_error('username', "This field is required unless all_blocks is requested.")
def test_no_user_all_blocks_none(self):
self.initial = {'requesting_user': self.staff}
self.form_data.pop('username')
self.assert_error('username', "This field is required unless all_blocks is requested.")
def test_no_user_non_staff(self):
self.form_data.pop('username')
self.form_data['all_blocks'] = True
self.assert_raises_permission_denied()
def test_nonexistent_user_by_student(self):
self.form_data['username'] = 'non_existent_user'
self.assert_raises_permission_denied()
def test_nonexistent_user_by_staff(self):
self.initial = {'requesting_user': self.staff}
self.form_data['username'] = 'non_existent_user'
self.assert_raises_not_found()
def test_other_user_by_student(self):
self.form_data['username'] = self.student2.username
self.assert_raises_permission_denied()
def test_other_user_by_staff(self):
self.initial = {'requesting_user': self.staff}
self.get_form(expected_valid=True)
def test_unenrolled_student(self):
CourseEnrollment.unenroll(self.student, self.course.id)
self.assert_raises_permission_denied()
def test_unenrolled_staff(self):
CourseEnrollment.unenroll(self.staff, self.course.id)
self.initial = {'requesting_user': self.staff}
self.form_data['username'] = self.staff.username
self.get_form(expected_valid=True)
def test_unenrolled_student_by_staff(self):
CourseEnrollment.unenroll(self.student, self.course.id)
self.initial = {'requesting_user': self.staff}
self.get_form(expected_valid=True)
#-- depth
def test_depth_integer(self):
self.form_data['depth'] = 3
self.cleaned_data['depth'] = 3
self.assert_equals_cleaned_data()
def test_depth_all(self):
self.form_data['depth'] = 'all'
self.cleaned_data['depth'] = None
self.assert_equals_cleaned_data()
def test_depth_invalid(self):
self.form_data['depth'] = 'not_an_integer'
self.assert_error('depth', "'not_an_integer' is not a valid depth value.")
#-- nav depth
def test_nav_depth(self):
self.form_data['nav_depth'] = 3
self.cleaned_data['nav_depth'] = 3
self.cleaned_data['requested_fields'] |= {'nav_depth'}
self.assert_equals_cleaned_data()
def test_nav_depth_invalid(self):
self.form_data['nav_depth'] = 'not_an_integer'
self.assert_error('nav_depth', "Enter a whole number.")
def test_nav_depth_negative(self):
self.form_data['nav_depth'] = -1
self.assert_error('nav_depth', "Ensure this value is greater than or equal to 0.")
#-- return_type
def test_return_type(self):
self.form_data['return_type'] = 'list'
self.cleaned_data['return_type'] = 'list'
self.assert_equals_cleaned_data()
def test_return_type_invalid(self):
self.form_data['return_type'] = 'invalid_return_type'
self.assert_error(
'return_type',
"Select a valid choice. invalid_return_type is not one of the available choices."
)
#-- requested fields
def test_requested_fields(self):
self.form_data.setlist('requested_fields', ['graded', 'nav_depth', 'some_other_field'])
self.cleaned_data['requested_fields'] |= {'graded', 'nav_depth', 'some_other_field'}
self.assert_equals_cleaned_data()
@ddt.data('block_counts', 'student_view_data')
def test_higher_order_field(self, field_name):
field_value = {'block_type1', 'block_type2'}
self.form_data.setlist(field_name, field_value)
self.cleaned_data[field_name] = field_value
self.cleaned_data['requested_fields'].add(field_name)
self.assert_equals_cleaned_data()
def test_combined_fields(self):
# add requested fields
self.form_data.setlist('requested_fields', ['field1', 'field2'])
# add higher order fields
block_types_list = {'block_type1', 'block_type2'}
for field_name in ['block_counts', 'student_view_data']:
self.form_data.setlist(field_name, block_types_list)
self.cleaned_data[field_name] = block_types_list
# verify the requested_fields in cleaned_data includes all fields
self.cleaned_data['requested_fields'] |= {'field1', 'field2', 'student_view_data', 'block_counts'}
self.assert_equals_cleaned_data()
| franosincic/edx-platform | lms/djangoapps/course_api/blocks/tests/test_forms.py | Python | agpl-3.0 | 8,417 |
__author__ = 'student'
a = 1
while a <= 100:
if a % 3 == 0:
if a % 5 != 0:
print('Fizz')
else:
print('FizzBuzz')
elif a % 5 == 0:
print('Buzz')
else:
print(a)
a += 1 | YellowNettle/Labs-16 | 3_2.py | Python | gpl-3.0 | 237 |
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from ...common.ConfigGroup import ConfigGroup
class CGImage(ConfigGroup):
cp_Href = str()
| sbbic/core | wizards/com/sun/star/wizards/web/data/CGImage.py | Python | gpl-3.0 | 894 |
# This work is licensed under the GNU GPLv2 or later.
# See the COPYING file in the top-level directory.
import tempfile
import pytest
import bugzilla
import tests
import tests.mockbackend
import tests.utils
#################################
# 'bugzilla login' mock testing #
#################################
def test_login(run_cli):
cmd = "bugzilla login FOO BAR"
fakebz = tests.mockbackend.make_bz(
user_login_args="data/mockargs/test_login.txt",
user_login_return=RuntimeError("TEST ERROR"))
out = run_cli(cmd, fakebz, expectfail=True)
assert "Login failed: TEST ERROR" in out
fakebz = tests.mockbackend.make_bz(
user_login_args="data/mockargs/test_login.txt",
user_login_return={})
out = run_cli(cmd, fakebz)
assert "Login successful" in out
cmd = "bugzilla --restrict-login --user FOO --password BAR login"
fakebz = tests.mockbackend.make_bz(
user_login_args="data/mockargs/test_login-restrict.txt",
user_login_return={})
out = run_cli(cmd, fakebz)
assert "Login successful" in out
cmd = "bugzilla --ensure-logged-in --user FOO --password BAR login"
# Raises raw error trying to see if we aren't logged in
with pytest.raises(NotImplementedError):
fakebz = tests.mockbackend.make_bz(
user_login_args="data/mockargs/test_login.txt",
user_login_return={},
user_get_args=None,
user_get_return=NotImplementedError())
out = run_cli(cmd, fakebz)
# Errors with expected code
cmd = "bugzilla --ensure-logged-in --user FOO --password BAR login"
fakebz = tests.mockbackend.make_bz(
user_login_args="data/mockargs/test_login.txt",
user_login_return={},
user_get_args=None,
user_get_return=bugzilla.BugzillaError("TESTMESSAGE", code=505))
out = run_cli(cmd, fakebz, expectfail=True)
assert "--ensure-logged-in passed but you" in out
# Returns success for logged_in check and hits a tokenfile line
cmd = "bugzilla --ensure-logged-in "
cmd += "login FOO BAR"
fakebz = tests.mockbackend.make_bz(
bz_kwargs={"use_creds": True},
user_login_args="data/mockargs/test_login.txt",
user_login_return={'id': 1234, 'token': 'my-fake-token'},
user_get_args=None,
user_get_return={})
out = run_cli(cmd, fakebz)
assert "Token cache saved" in out
assert fakebz.tokenfile in out
assert "Consider using bugzilla API" in out
def test_interactive_login(monkeypatch, run_cli):
bz = tests.mockbackend.make_bz(
user_login_args="data/mockargs/test_interactive_login.txt",
user_login_return={},
user_logout_args=None,
user_logout_return={},
user_get_args=None,
user_get_return={})
tests.utils.monkeypatch_getpass(monkeypatch)
cmd = "bugzilla login"
fakestdin = "fakeuser\nfakepass\n"
out = run_cli(cmd, bz, stdin=fakestdin)
assert "Bugzilla Username:" in out
assert "Bugzilla Password:" in out
# API key prompting and saving
tmp = tempfile.NamedTemporaryFile()
bz.configpath = [tmp.name]
bz.url = "https://example.com"
cmd = "bugzilla login --api-key"
fakestdin = "MY-FAKE-KEY\n"
out = run_cli(cmd, bz, stdin=fakestdin)
assert "API Key:" in out
assert tmp.name in out
tests.utils.diff_compare(open(tmp.name).read(),
"data/clioutput/test_interactive_login_apikey_rcfile.txt")
# Check that we don't attempt to log in if API key is configured
assert bz.api_key
cmd = "bugzilla login"
out = run_cli(cmd, bz)
assert "already have an API" in out
| wgwoods/python-bugzilla | tests/test_cli_login.py | Python | gpl-2.0 | 3,667 |
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Views for schemas."""
from flask import Blueprint, abort, jsonify, request
from flask.views import MethodView
from flask_login import current_user
from invenio_db import db
from invenio_jsonschemas.errors import JSONSchemaNotFound
from jsonref import JsonRefError
from sqlalchemy.exc import IntegrityError
from cap.modules.access.utils import login_required
from .models import Schema
from .permissions import AdminSchemaPermission, ReadSchemaPermission
from .serializers import schema_serializer, update_schema_serializer
from .utils import get_indexed_schemas_for_user, get_schemas_for_user
blueprint = Blueprint(
'cap_schemas',
__name__,
url_prefix='/jsonschemas',
)
class SchemaAPI(MethodView):
"""CRUD views for Schema model."""
decorators = [login_required]
def get(self, name=None, version=None):
"""Get all schemas that user has access to."""
resolve = request.args.get('resolve', False)
latest = request.args.get('latest', False)
if name:
try:
if version:
schema = Schema.get(name, version)
else:
schema = Schema.get_latest(name)
except JSONSchemaNotFound:
abort(404)
if not ReadSchemaPermission(schema).can():
abort(403)
try:
response = schema.serialize(resolve=resolve)
except JsonRefError:
abort(404)
else:
schemas = get_schemas_for_user(latest=latest)
response = [
schema.serialize(resolve=resolve) for schema in schemas
]
return jsonify(response)
def post(self):
"""Create new schema."""
data = request.get_json()
serialized_data, errors = schema_serializer.load(data)
if errors:
raise abort(400, errors)
try:
with db.session.begin_nested():
with db.session.begin_nested():
schema = Schema(**serialized_data)
db.session.add(schema)
schema.give_admin_access_for_user(current_user)
except IntegrityError:
raise abort(400, 'Error occured during saving schema in the db.')
return jsonify(schema.serialize())
def put(self, name, version):
"""Update schema."""
try:
schema = Schema.get(name, version)
except JSONSchemaNotFound:
abort(404)
with AdminSchemaPermission(schema).require(403):
data = request.get_json()
serialized_data, errors = update_schema_serializer.load(
data, partial=True)
if errors:
raise abort(400, errors)
schema.update(**serialized_data)
db.session.commit()
return jsonify(schema.serialize())
def delete(self, name, version):
"""Delete schema."""
try:
schema = Schema.get(name, version)
except JSONSchemaNotFound:
abort(404)
with AdminSchemaPermission(schema).require(403):
db.session.delete(schema)
db.session.commit()
return 'Schema deleted.', 204
schema_view_func = SchemaAPI.as_view('schemas')
blueprint.add_url_rule('/', view_func=schema_view_func, methods=[
'GET',
])
blueprint.add_url_rule('/', view_func=schema_view_func, methods=[
'POST',
])
blueprint.add_url_rule('/<string:name>',
view_func=schema_view_func,
methods=[
'GET',
])
blueprint.add_url_rule('/<string:name>/<string:version>',
view_func=schema_view_func,
methods=['GET', 'PUT', 'DELETE'])
| cernanalysispreservation/analysis-preservation.cern.ch | cap/modules/schemas/views.py | Python | gpl-2.0 | 4,908 |
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from workflows.models.workflow import Workflow
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name="userprofile")
active_workflow = models.ForeignKey(Workflow, related_name="users", null=True, blank=True,
on_delete=models.SET_NULL)
def __str__(self):
return str(self.user)
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
def create_user_profile(sender, instance, created, **kwargs):
profile_set = UserProfile.objects.filter(user__id=instance.id)
if created and not profile_set.exists():
UserProfile.objects.create(user=instance)
# nardi da k nardimo userja da se avtomatsko nardi se UserProfile
post_save.connect(create_user_profile, sender=User) | xflows/clowdflows-backend | workflows/models/user_profile.py | Python | mit | 1,084 |
from __future__ import print_function
import sys
#
"""
Utility methods for CRUD of alarms
"""
def get_state(mon_client, alarm_id):
result = get(mon_client, alarm_id)
return result['state']
def get(mon_client, alarm_id):
result = mon_client.alarms.get(**{'alarm_id': alarm_id})
return result
def disable(mon_client, alarm_id):
patch(mon_client, alarm_id, {'actions_enabled': False})
def enable(mon_client, alarm_id):
patch(mon_client, alarm_id, {'actions_enabled': True})
def set_state(mon_client, alarm_id, state):
patch(mon_client, alarm_id, {'state': state})
new_state = get_state(mon_client, alarm_id)
if new_state != state:
print('Expected new state %s but found %s' %
(state, new_state), file=sys.stderr)
return False
return True
def patch(mon_client, alarm_id, fields):
fields['alarm_id'] = alarm_id
mon_client.alarms.patch(**fields)
def set_optional_field(name, value, fields):
if value is not None:
fields[name] = value
def create(mon_client, name, description, expression, ok_actions=None,
alarm_actions=None, undetermined_actions=None):
fields = {}
fields['name'] = name
fields['expression'] = expression
set_optional_field('description', description, fields)
set_optional_field('ok_actions', ok_actions, fields)
set_optional_field('alarm_actions', alarm_actions, fields)
set_optional_field('undetermined_actions', undetermined_actions, fields)
result = mon_client.alarms.create(**fields)
return result['id']
def find_alarm_byname(mon_client, alarm_name):
alarms = mon_client.alarms.list(**{})
for alarm in alarms:
if alarm['name'] == alarm_name:
return alarm
return None
| srsakhamuri/monasca-ansible-scripts | tests/alarm.py | Python | apache-2.0 | 1,773 |
from sklearn.preprocessing import LabelEncoder
from splearn.preprocessing import SparkLabelEncoder
from splearn.utils.testing import SplearnTestCase, assert_array_equal
class TestSparkLabelEncoder(SplearnTestCase):
def test_same_fit_transform(self):
Y, Y_rdd = self.make_dense_randint_rdd(low=0, high=10, shape=(1000,))
local = LabelEncoder()
dist = SparkLabelEncoder()
assert_array_equal(local.fit_transform(Y),
dist.fit_transform(Y_rdd).toarray())
def test_same_classes(self):
Y, Y_rdd = self.make_dense_randint_rdd(low=0, high=10, shape=(1000,))
local = LabelEncoder().fit(Y)
dist = SparkLabelEncoder().fit(Y_rdd)
assert_array_equal(local.classes_, dist.classes_)
def test_same_inverse_transform(self):
Y, Y_rdd = self.make_dense_randint_rdd(low=0, high=10, shape=(1000,))
local = LabelEncoder().fit(Y)
dist = SparkLabelEncoder().fit(Y_rdd)
assert_array_equal(local.inverse_transform(Y),
dist.inverse_transform(Y_rdd).toarray())
| lensacom/sparkit-learn | splearn/preprocessing/tests/test_label.py | Python | apache-2.0 | 1,105 |
import sys
import unittest
from pytos.common.base_types import Service_Type, Single_Service_Type, \
Any_Service_Type, Range_Service_Type, Group_Service_Type, Service_Set
__author__ = 'saar.katz'
service_port = {'http': 80, 'https': 443, 'ftp': 21, 'gopher': 70, 'smtp': 25,
'imap': 143, 'imaps': 993, 'pop3': 110, 'pop3s': 995}
ip_protocol = {'icmp': 1, 'udp': 17, 'tcp': 6}
class Test_Service_Types(unittest.TestCase):
def test_service_type(self):
# Assertions for get_valid_port.
# By name.
assert Service_Type.get_valid_port('http') == service_port['http']
assert Service_Type.get_valid_port('https') == service_port['https']
with self.assertRaises(ValueError) as ex:
Service_Type.get_valid_port('not_exists')
assert "Service for port 'not_exists' not found." in str(ex.exception)
# By number.
assert Service_Type.get_valid_port(5) == 5
assert Service_Type.get_valid_port(65535) == 65535
with self.assertRaises(ValueError) as ex:
Service_Type.get_valid_port(65536)
assert "Port must be between 0 and 65535." in str(ex.exception)
# Neither name nor number.
with self.assertRaises(ValueError) as ex:
Service_Type.get_valid_port([80, 443, 25])
assert "Invalid port '[80, 443, 25]'." in str(ex.exception)
# Assertions for get_valid_protocol.
# By name.
assert Service_Type.get_valid_protocol('udp') == ip_protocol['udp']
assert Service_Type.get_valid_protocol('tcp') == ip_protocol['tcp']
with self.assertRaises(ValueError) as ex:
Service_Type.get_valid_protocol('not_exists')
assert "Protocol 'not_exists' not found." in str(ex.exception)
# By number.
with self.assertRaises(ValueError) as ex:
Service_Type.get_valid_protocol(-1)
assert "Protocol must be between 0 and 255." in str(ex.exception)
# Neither name nor number.
with self.assertRaises(ValueError) as ex:
Service_Type.get_valid_protocol({'icmp': 1})
assert "Invalid IP protocol '{'icmp': 1}'." in str(ex.exception)
def test_single_service_type(self):
single_service_type1 = Single_Service_Type(17, 'https')
single_service_type2 = Single_Service_Type('udp', 443)
single_service_type3 = Single_Service_Type('tcp', 'http')
single_service_type4 = Single_Service_Type('icmp', 'imaps')
# Assertions for __eq__ and __contains__
assert single_service_type1 == single_service_type2
assert single_service_type1 in single_service_type2
assert single_service_type1 != single_service_type3
assert single_service_type3 not in single_service_type4
assert not single_service_type1 == 443
# Assertions for __hash__
assert hash(single_service_type1) == hash(single_service_type2)
assert hash(single_service_type3) != hash(single_service_type4)
# Assertions for __lt__
assert single_service_type1 > single_service_type3
with self.assertRaises(AssertionError):
assert not single_service_type1 > single_service_type4
assert not single_service_type1 < single_service_type4
# Assertion for __repr__
assert single_service_type1 == eval(repr(single_service_type1))
def test_range_service_type(self):
range_service_type1 = Range_Service_Type('tcp', 'http', 443)
range_service_type2 = Range_Service_Type(6, 80, 'https')
range_service_type3 = Range_Service_Type('udp', 81, 150)
range_service_type4 = Range_Service_Type('tcp', 250, 443)
range_service_type5 = Range_Service_Type('icmp', 21, 151)
# Assertion for __eq__
assert range_service_type1 == range_service_type2
assert range_service_type3 != 17
assert range_service_type5 != range_service_type4
# Assertions for __contains__
assert range_service_type1 in range_service_type2
assert range_service_type4 in range_service_type2
assert range_service_type1 not in range_service_type4
assert range_service_type3 not in range_service_type2
# Assertions for __hash__
assert hash(range_service_type1) == hash(range_service_type2)
assert hash(range_service_type3) != hash(range_service_type4)
# Assertions for __lt__
assert range_service_type5 < range_service_type2
assert range_service_type5 < range_service_type4
assert not range_service_type1 < range_service_type2
assert not range_service_type3 < range_service_type4
# Assertion for __repr__
assert range_service_type1 == eval(repr(range_service_type1))
def test_group_service_type(self):
single_service_type1 = Single_Service_Type('tcp', 80)
single_service_type2 = Single_Service_Type('tcp', 70)
single_service_type3 = Single_Service_Type('udp', 443)
range_service_type1 = Range_Service_Type('tcp', 80, 100)
range_service_type2 = Range_Service_Type('tcp', 85, 95)
range_service_type3 = Range_Service_Type('tcp', 70, 90)
group_service_type1 = Group_Service_Type([single_service_type3])
assert single_service_type3 in group_service_type1
assert single_service_type1 not in group_service_type1
group_service_type1.append(range_service_type1)
assert single_service_type1 in group_service_type1
assert single_service_type2 not in group_service_type1
assert range_service_type2 in group_service_type1
assert range_service_type3 not in group_service_type1
group_service_type1.append(single_service_type2)
assert range_service_type3 not in group_service_type1
group_service_type2 = Group_Service_Type([single_service_type2])
assert group_service_type2 in group_service_type1
group_service_type2 = Group_Service_Type([single_service_type1,
single_service_type2,
range_service_type2])
assert group_service_type2 in group_service_type1
group_service_type2.append(range_service_type3)
assert group_service_type2 not in group_service_type1
assert len(group_service_type1) == 3
# Assertion for __repr__
assert group_service_type1 in eval(repr(group_service_type1))
def test_service_set(self):
single_service_type1 = Single_Service_Type('tcp', 80)
single_service_type2 = Single_Service_Type('udp', 443)
range_service_type1 = Range_Service_Type('tcp', 80, 100)
range_service_type2 = Range_Service_Type('tcp', 85, 95)
range_service_type3 = Range_Service_Type('tcp', 70, 90)
group_service_type1 = Group_Service_Type([single_service_type2])
service_set1 = Service_Set(group_service_type1)
assert single_service_type1 not in service_set1
assert range_service_type2 not in service_set1
assert single_service_type2 in service_set1
assert group_service_type1 in service_set1
service_set1.add(range_service_type1)
assert single_service_type1 in service_set1
assert range_service_type2 in service_set1
service_set2 = Service_Set([single_service_type1])
service_set2.add(range_service_type2)
assert service_set2.issubset(service_set1)
service_set2.add(range_service_type3)
assert not service_set2.issubset(service_set1)
assert len(service_set1) == 2
service_set1.add(single_service_type2)
assert len(service_set1) == 2
service_set1.add(Any_Service_Type())
assert len(service_set1) == 3
service_set1.add(Any_Service_Type())
assert len(service_set1) == 3
# Assertion for copy
assert service_set1 in service_set1.copy()
# Assertion for __repr__
assert service_set2 in eval(repr(service_set2))
def test_lt_in_between_service_types(self):
single_service_type1 = Single_Service_Type('tcp', 80)
range_service_type1 = Range_Service_Type('tcp', 80, 100)
group_service_type1 = Group_Service_Type([range_service_type1])
any_service_type = Any_Service_Type()
assert single_service_type1 < any_service_type
assert single_service_type1 < range_service_type1
assert not single_service_type1 > range_service_type1
assert not single_service_type1 < group_service_type1
assert not single_service_type1 > any_service_type
assert single_service_type1 > group_service_type1
assert range_service_type1 < any_service_type
assert not range_service_type1 < group_service_type1
assert not range_service_type1 > any_service_type
assert range_service_type1 > group_service_type1
assert group_service_type1 < any_service_type
assert not group_service_type1 > any_service_type
if __name__ == '__main__':
unittest.main() | Tufin/pytos | tests/common_test/test_service_types_unittest.py | Python | apache-2.0 | 9,119 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.