code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-03 20:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=255, unique=True, verbose_name='Назва')),
('image', models.ImageField(upload_to='albums/', verbose_name='Головне зображення')),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Заголовок')),
('description', models.TextField(verbose_name='Опис')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Дата створення')),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='gallary.Album')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to=settings.AUTH_USER_MODEL, verbose_name='Завантажено')),
],
),
]
| aodarc/flowers_room | apps/gallary/migrations/0001_initial.py | Python | mit | 1,751 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Satellite Functions
^^^^^^^^^^^^^^^^^^^
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = ["correct_parallax", "dist_from_orbit"]
__doc__ = __doc__.format("\n ".join(__all__))
import numpy as np
def correct_parallax(sr_xy, nbin, drt, alpha):
"""Adjust the geo-locations of the SR pixels
With *SR*, we refer to precipitation radars based on space-born platforms
such as TRMM or GPM.
The `sr_xy` coordinates of the SR beam footprints need to be in the
azimuthal equidistant projection of the ground radar. This ensures that the
ground radar is fixed at xy-coordinate (0, 0), and every SR bin has its
relative xy-coordinates with respect to the ground radar site.
Parameters
----------
sr_xy : :class:`numpy:numpy.ndarray`
Array of xy-coordinates of shape (nscans, nbeams, 2)
nbin : int
Number of bins along SR beam.
drt : float
Gate lenght of SR in meter.
alpha: :class:`numpy:numpy.ndarray`
Array of local zenith angles of the SR beams
with shape (nscans, nbeams).
Returns
-------
sr_xyp : :class:`numpy:numpy.ndarray`
Array of parallax corrected coordinates
of shape (nscans, nbeams, nbins, 2).
r_sr_inv : :class:`numpy:numpy.ndarray`
Array of ranges from ground to SR platform of shape (nbins).
z_sr : :class:`numpy:numpy.ndarray`
Array of SR bin altitudes of shape (nscans, nbeams, nbins).
"""
# get x,y-grids
sr_x = sr_xy[..., 0]
sr_y = sr_xy[..., 1]
# create range array from ground to satellite
r_sr_inv = np.arange(nbin) * drt
# calculate height of bin
z_sr = r_sr_inv * np.cos(np.deg2rad(alpha))[..., np.newaxis]
# calculate bin ground xy-displacement length
ds = r_sr_inv * np.sin(np.deg2rad(alpha))[..., np.newaxis]
# calculate x,y-differences between ground coordinate
# and center ground coordinate [25th element]
center = int(np.floor(len(sr_x[-1]) / 2.0))
xdiff = sr_x - sr_x[:, center][:, np.newaxis]
ydiff = sr_y - sr_y[:, center][:, np.newaxis]
# assuming ydiff and xdiff being a triangles adjacent and
# opposite this calculates the xy-angle of the SR scan
ang = np.arctan2(ydiff, xdiff)
# calculate displacement dx, dy from displacement length
dx = ds * np.cos(ang)[..., np.newaxis]
dy = ds * np.sin(ang)[..., np.newaxis]
# subtract displacement from SR ground coordinates
sr_xp = sr_x[..., np.newaxis] - dx
sr_yp = sr_y[..., np.newaxis] - dy
return np.stack((sr_xp, sr_yp), axis=3), r_sr_inv, z_sr
def dist_from_orbit(sr_alt, alpha, beta, r_sr_inv, re):
"""Returns range distances of SR bins (in meters) as seen from the orbit
With *SR*, we refer to precipitation radars based on space-born platforms
such as TRMM or GPM.
Parameters
----------
sr_alt : float
SR orbit height in meters.
alpha: :class:`numpy:numpy.ndarray`
Array of local zenith angles of the SR beams
with shape (nscans, nbeams).
beta: :class:`numpy:numpy.ndarray`
Off-Nadir scan angle with shape (nbeams).
r_sr_inv : :class:`numpy:numpy.ndarray`
Array of ranges from ground to SR platform of shape (nbins).
re : float
earth radius [m]
Returns
-------
ranges : :class:`numpy:numpy.ndarray`
Array of shape (nbeams, nbins) of PR bin range distances from
SR platform in orbit.
"""
ro = (
(re + sr_alt) * np.cos(np.radians(alpha - beta[np.newaxis, :])) - re
) / np.cos(np.radians(alpha))
return ro[..., np.newaxis] - r_sr_inv
| wradlib/wradlib | wradlib/georef/satellite.py | Python | mit | 3,810 |
# 2014-01 Jason Roebuck
# Product of work for GEOG 590 @ Portland State University
# May be used for whatever!
# github.com/jtroe/GEOG-590 - Fork me on github!
def main():
# Declare a good, old fashioned greeting.
greeting = 'Hello, Portland!'
print greeting
# print a separator
print '======'
# prints every character from 'Hello, Portland!' on it's very own line!
for char in greeting:
print char
print '======'
# should print 'Hell Portland!'
print greeting[0:4], greeting[7:]
print '======'
# declare a list of smurf strings
mySmurfList = ['Papa', 'Smurfette', 'Hefty', 'Brainy', 'Grouchy', 'Clumsy']
for smurf in mySmurfList:
# if string length is greater than 4, print it! Sorry, papa.
if len(smurf) > 4:
print smurf
print '======'
# equivalent of the more traditional for loop.
# instead of getting the actual object of the list, gets the index
# for(int i = 0; i < mySmurfList.Length; i++) <= C# equivalent
for i in range(len(mySmurfList)):
print mySmurfList[i]
if __name__ == "__main__":
main() | jtroe/GEOG-590 | Assignment1/helloworld.py | Python | unlicense | 1,136 |
from django.conf.urls import patterns, url
from web import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^login', views.cosergate_login, name='login'),
url(r'^logout', views.cosergate_logout, name='logout'),
url(r'^signup', views.cosergate_signup, name='signup'),
url(r'^home', views.home, name='home'),
url(r'^account', views.account, name='account')
)
| tapionx/cosergate | web/urls.py | Python | agpl-3.0 | 397 |
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
def _batch_renormalization(expander, gamma, beta, x, mean, var, eps, test,
r, d):
mean = mean[expander]
if test:
std = numpy.sqrt(var[expander])
r, d = 1, 0
else:
std = numpy.sqrt(var[expander] + eps)
y_expect = gamma * ((x - mean) / std * r + d) + beta
return y_expect
@testing.parameterize(*(testing.product({
'test': [True, False],
'ndim': [0, 1, 2, 3],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class BatchRenormalizationTest(unittest.TestCase):
def setUp(self):
self.expander = (None, Ellipsis) + (None,) * self.ndim
self.aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
self.rmax = self.dtype(3)
self.dmax = self.dtype(5)
self.link = links.BatchRenormalization(3, rmax=self.rmax,
dmax=self.dmax,
dtype=self.dtype)
gamma = self.link.gamma.data
gamma[...] = numpy.random.uniform(.5, 1, gamma.shape)
beta = self.link.beta.data
beta[...] = numpy.random.uniform(-1, 1, beta.shape)
self.link.cleargrads()
self.gamma = gamma.copy()[self.expander] # fixed on CPU
self.beta = beta.copy()[self.expander] # fixed on CPU
shape = (5, 3) + (2,) * self.ndim
self.x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
if self.test:
self.mean = numpy.random.uniform(-1, 1, (3,)).astype(self.dtype)
self.var = numpy.random.uniform(0.5, 1, (3,)).astype(self.dtype)
self.link.avg_mean[...] = self.mean
self.link.avg_var[...] = self.var
self.running_mean = self.mean
self.running_var = self.var
else:
self.mean = self.x.mean(axis=self.aggr_axes)
self.var = self.x.var(axis=self.aggr_axes)
# Need to add some noise to running_mean and running_var,
# otherwise we will always get r=1, d=0
self.running_mean = self.mean + numpy.random.uniform(
-1, 1, self.mean.shape).astype(self.dtype)
self.running_var = numpy.abs(self.var + numpy.random.uniform(
-1, 1, self.var.shape).astype(self.dtype))
self.link.avg_mean[...] = self.running_mean
self.link.avg_var[...] = self.running_var
self.check_forward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_optionss = {'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_optionss = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_optionss = {'atol': 5e-1, 'rtol': 1e-1}
def check_forward(self, x_data):
with chainer.using_config('train', not self.test):
x = chainer.Variable(x_data)
y = self.link(x)
self.assertEqual(y.data.dtype, self.dtype)
sigma_batch = numpy.sqrt(self.var)
running_sigma = numpy.sqrt(self.running_var)
r = numpy.clip(sigma_batch / running_sigma, 1.0 / self.rmax, self.rmax)
d = numpy.clip((self.mean - self.running_mean) / running_sigma,
-self.dmax, self.dmax)
y_expect = _batch_renormalization(
self.expander, self.gamma, self.beta, self.x, self.mean,
self.var, self.link.eps, self.test,
r[self.expander], d[self.expander])
testing.assert_allclose(
y_expect, y.data, **self.check_forward_optionss)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.link.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
@attr.multi_gpu(2)
@condition.retry(3)
def test_forward_multi_gpu(self):
with cuda.get_device_from_id(1):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
with cuda.get_device_from_id(0):
self.check_forward(x)
@testing.parameterize(
{'nx': 10, 'ny': 10},
# TODO(Kenta Oono)
# Pass the case below (this test does not pass when nx != ny).
# {'nx': 10, 'ny': 15}
)
class TestPopulationStatistics(unittest.TestCase):
def setUp(self):
self.decay = 0.9
self.size = 3
self.link = links.BatchRenormalization(self.size, self.decay)
self.x = numpy.random.uniform(
-1, 1, (self.nx, self.size)).astype(numpy.float32)
self.y = numpy.random.uniform(
-1, 1, (self.ny, self.size)).astype(numpy.float32)
def check_statistics(self, x, y):
x = chainer.Variable(x)
self.link(x, finetune=True)
mean = self.x.mean(axis=0)
testing.assert_allclose(mean, self.link.avg_mean)
unbiased_var = self.x.var(axis=0) * self.nx / (self.nx - 1)
testing.assert_allclose(unbiased_var, self.link.avg_var)
with chainer.using_config('train', False):
y = chainer.Variable(y)
self.link(y, finetune=True)
testing.assert_allclose(mean, self.link.avg_mean)
testing.assert_allclose(unbiased_var, self.link.avg_var)
@condition.retry(3)
def test_statistics_cpu(self):
self.check_statistics(self.x, self.y)
@attr.gpu
@condition.retry(3)
def test_statistics_gpu(self):
self.link.to_gpu()
self.check_statistics(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
def check_statistics2(self, x, y):
x = chainer.Variable(x)
y = chainer.Variable(y)
self.link(x, finetune=True)
self.link(y, finetune=True)
mean = (self.x.sum(axis=0) + self.y.sum(axis=0)) / (self.nx + self.ny)
var = (self.x.var(axis=0) * self.nx +
self.y.var(axis=0) * self.ny) / (self.nx + self.ny)
# TODO(Kenta Oono)
# Fix the estimate of the unbiased variance.
# Unbiased variance should be (nx + ny) / (nx + ny - 1) times of
# the variance.
# But the multiplier is ny / (ny - 1) in current implementation
# these two values are different when nx is not equal to ny.
unbiased_var = var * self.ny / (self.ny - 1)
testing.assert_allclose(mean, self.link.avg_mean)
testing.assert_allclose(unbiased_var, self.link.avg_var)
@condition.retry(3)
def test_statistics2_cpu(self):
self.check_statistics2(self.x, self.y)
@attr.gpu
@condition.retry(3)
def test_statistics2_gpu(self):
self.link.to_gpu()
self.check_statistics2(
cuda.to_gpu(self.x),
cuda.to_gpu(self.y))
testing.run_module(__name__, __file__)
| rezoo/chainer | tests/chainer_tests/links_tests/normalization_tests/test_batch_renormalization.py | Python | mit | 6,996 |
# IDLEX EXTENSION
from __future__ import print_function
## """
## Copyright(C) 2011-2012 The Board of Trustees of the University of Illinois.
## All rights reserved.
##
## Developed by: Roger D. Serwy
## University of Illinois
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal with the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## + Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimers.
## + Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimers in the
## documentation and/or other materials provided with the distribution.
## + Neither the names of Roger D. Serwy, the University of Illinois, nor
## the names of its contributors may be used to endorse or promote
## products derived from this Software without specific prior written
## permission.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR
## ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
## CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
## THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
##
## """
config_extension_def = """
[idlexManager]
enable=1
missing=
"""
version = '???' # updated from idlexMain.py
import sys
import os
import re
import imp
import __main__
if sys.version < '3':
from StringIO import StringIO
from Tkinter import *
import tkFileDialog
import tkMessageBox
else:
from io import StringIO
from tkinter import *
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
StandardError = Exception
from idlelib.configHandler import idleConf, IdleConfParser
import idlelib.textView as textView
import webbrowser
def update_globals(): # for calling from idlex.py
global IDLEX_URL
global UPDATE_URL
global DEV_EMAIL
IDLEX_URL = 'http://idlex.sourceforge.net/'
UPDATE_URL = '%supdate.html?version=%s' % (IDLEX_URL, version)
DEV_EMAIL = 'serwy@illinois.edu'
update_globals()
PATCH_MENU = False # Patch "Configure Extensions..." into options menu
class idlexManager(object):
if not PATCH_MENU:
menudefs = [
('options',
[('Configure Extensions...', '<<idlex-configure>>'),
None,
]),
]
else:
menudefs = []
menudefs.append( ('help',
[None,
('About Idle_X', '<<idlex-about>>'),
('Check for IdleX Updates', '<<idlex-update>>')]))
def __init__(self, editwin):
self.editwin = editwin
editwin.text.bind('<<idlex-configure>>',
self.idlex_configure_event)
editwin.text.bind('<<idlex-about>>',
self.idlex_about_event)
editwin.text.bind('<<idlex-update>>',
self.idlex_update_event)
if PATCH_MENU:
self.patch_menu()
def close(self):
idleConf.SaveUserCfgFiles()
def idlex_update_event(self, ev=None):
idlexUpdate(self.editwin.text)
def idlex_configure_event(self, ev=None):
idlexConfig(self.editwin.top)
def idlex_about_event(self, ev=None):
a = idlexAbout(self.editwin.top)
def patch_menu(self):
# patch "Configure Extensions" into the Options menu
e = self.editwin
f = e.menudict['options']
text = e.text
eventname = '<<idlex-configure>>'
def command(text=text, eventname=eventname):
text.event_generate(eventname)
f.insert_command(2, label="Configure Extensions...", command=command)
class idlexAbout(Toplevel):
# some code borrowed from aboutDialog.py, covered by the PSF License.
def __init__(self, parent):
Toplevel.__init__(self, parent)
title = 'About IdleX'# (version %s)' % __version__
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.close)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.close)
self.bind('<Escape>',self.close)
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonUpdate = Button(frameButtons, text='Check for Updates',
command=self.check_updates)
self.buttonUpdate.pack(padx=5, pady=5, side=LEFT)
self.buttonOk = Button(frameButtons, text='Close',
command=self.close)
self.buttonOk.pack(padx=5, pady=5, side=RIGHT)
t = Text(frameMain)
t.configure(width=40, height=15,
bg="#6091b0",
fg="#FFFFFF",
padx=10,
pady=10,
wrap=WORD,
borderwidth=0)
t.pack(expand=TRUE, fill=BOTH, side=LEFT)
vbar = Scrollbar(frameMain, name='vbar')
vbar.pack(side=RIGHT, fill=Y)
vbar['command'] = t.yview
t['yscrollcommand'] = vbar.set
t.tag_configure('CAP',
font=('courier', 24, 'bold'))
t.tag_configure('MINICAP',
font=('courier', 20, 'bold'))
t.tag_configure('TITLE',
background="#b37900",
foreground="#ffcf61",
relief=RIDGE,
borderwidth=5,
justify=LEFT,
)
# make IdleX title
t.insert('insert', ' I', 'CAP TITLE')
t.insert('insert', 'DLE', 'MINICAP TITLE')
t.insert('insert', 'X ', 'CAP TITLE')
t.insert('insert', '\n'*1)
# make message
msg = ['IDLE Extensions for Python',
'version %s' % version,
'',
'email: %s' % DEV_EMAIL,
'www: %s' % IDLEX_URL,
'',
'Copyright(C) 2011-2012 The Board of Trustees of the University of Illinois.',
'All rights reserved.',
'',
'See LICENSE.txt for more details',
'',
'IdleX includes some third-party extensions that are covered by their respective licenses and copyrights as found in the "license" directory.',
'',
'SearchBar.py and Squeezer.py',
'Copyright (c) 2011 Tal Einat',
'All rights reserved.',
'',
'IDLE2HTML.py',
'Copyright (c) 2001-2010 Python Software Foundation; All Rights Reserved.',
'\n'*4,
]
t.insert('insert', '\n'.join(msg))
t.config(state=DISABLED)
def close(self, event=None):
self.destroy()
def check_updates(self, ev=None):
idlexUpdate(self.parent)
return "break"
class idlexUpdate:
def __init__(self, text):
if sys.platform[:3] == 'win':
try:
os.startfile(UPDATE_URL)
except WindowsError as why:
tkMessageBox.showerror(title='Unable to load IdleX update page.',
message=str(why), parent=text)
else:
webbrowser.open(UPDATE_URL)
class idlexConfig(Toplevel):
def __init__(self, parent):
Toplevel.__init__(self, parent)
self.restart = False # Flag for displaying restart dialog
self.protocol("WM_DELETE_WINDOW", self.close)
self.gui = {}
self.parent = parent
self.build_gui()
self.populate_gui()
def close(self, ev=None):
if self.restart:
self.recommend_restart()
self.destroy()
def build_gui(self):
top = self
top.title('IdleX Extension Manager')
top.configure(borderwidth=5)
parent = self.parent
top.geometry("=+%d+%d" % (parent.winfo_rootx()+20,
parent.winfo_rooty()+30))
mainFrame = LabelFrame(top, borderwidth=0)
mainFrame.pack(side=TOP, fill=BOTH, expand=True, padx=3, pady=3)
### gui for enable/disable extension
f2 = LabelFrame(mainFrame, borderwidth=2, relief=GROOVE,
text='Enable/Disable Extensions:')
lbframe = Frame(f2, borderwidth=0)
scrollbar = Scrollbar(lbframe, orient=VERTICAL)
lb = Listbox(lbframe, yscrollcommand=scrollbar.set)
scrollbar.config(command=lb.yview)
scrollbar.pack(side=RIGHT, fill=Y)
lb.pack(side=TOP, fill=BOTH, expand=True)
lbframe.pack(side=TOP, fill=BOTH, padx=6, pady=6, expand=True)
lb.bind("<Double-Button-1>", self.toggle)
tog_B = Button(f2, text='Enable/Disable', command=self.toggle)
tog_B.pack(side=LEFT, padx=6, pady=3)
clear_B = Button(f2, text='Use Extension Defaults',
command=self.clear_custom)
clear_B.pack(side=LEFT, padx=6, pady=3)
f2.pack(side=TOP, fill=BOTH, expand=True)
self.gui['extension_list'] = lb
### dialog
close_B = Button(mainFrame, text='Close',
command=self.close)
close_B.pack(side=RIGHT)
def populate_gui(self):
IDLE_DEFAULT_EXT = extensionManager.IDLE_EXTENSIONS
ext_list = idleConf.GetExtensions(active_only=False)
ext_list.sort(key=str.lower)
if 'idlexManager' in ext_list:
ext_list.remove('idlexManager') # idlex enabled by default.
lb = self.gui['extension_list']
lb.delete(0, END) # reset the list
for item in ext_list:
ext_found = True
try:
extensionManager.find_extension(item)
except ImportError:
ext_found = False
en = idleConf.GetOption('extensions', item, 'enable', type='int')
info = ''
if item in IDLE_DEFAULT_EXT:
info += ' (built-in) '
if not ext_found:
if item not in IDLE_DEFAULT_EXT:
if sys.modules.get('idlexlib.extensions.%s' % item) is not None:
info += ' (RESTART TO UNLOAD) '
else:
info += ' (NOT FOUND IN PATH) '
if en:
enstr = '1'
else:
enstr = '0'
text = ' [%s] %s %s' % (enstr, item, info)
lb.insert(END, text)
self.extensions = ext_list
def get_sel(self):
LB = self.gui['extension_list']
sel = LB.curselection()
if not sel:
return None
else:
return int(sel[0])
def toggle(self, ev=None):
""" Toggle the selected extension's enable status """
sel = self.get_sel()
if sel is None: return
item = self.extensions[sel]
en = not idleConf.GetOption('extensions', item, 'enable',
type='bool', default=True)
en = int(en)
idleConf.SetOption('extensions', item, 'enable', '%s' % en)
idleConf.SaveUserCfgFiles()
self.repopulate_list()
self.restart = True
def repopulate_list(self):
sel = self.get_sel()
# remember the list box settings
lb = self.gui['extension_list']
y = lb.yview()
self.populate_gui()
if sel > lb.index(END):
sel = lb.index(END)
# restore the list box settings
lb.yview_moveto(y[0])
lb.activate(sel)
lb.select_set(sel)
lb.focus_set()
def clear_custom(self):
""" Delete the configuration for an extension from the
user configuration found in .idlerc/config-extensions.cfg """
sel = self.get_sel()
if sel is None: return
ext_name = self.extensions[sel]
idleConf.userCfg['extensions'].remove_section(ext_name)
idleConf.userCfg['extensions'].remove_section(ext_name + '_cfgBindings')
# reload this extension config
extensionManager.reload_cfg(ext_name)
self.repopulate_list()
def recommend_restart(self):
msg = """The extension configuration has changed. Changes
will take effect on newly opened editors and shells.
A restart is recommended, but not required.
"""
msg = re.sub(r"[\s]{2,}", " ", msg)
tkMessageBox.showinfo(parent=self,
title="IDLE Restart Recommended",
message=msg)
################################################################
##
##def _about():
## root = Tk()
## idlexAbout(root)
## root.mainloop()
##
##if __name__ == '__main__':
## _about()
| technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/idlexlib/extensions/idlexManager.py | Python | gpl-3.0 | 13,899 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from Tkinter import FLAT
from ttk import Style
########################################################################
class TkStyles(object):
#----------------------------------------------------------------------
@classmethod
def create_styles(self):
""""""
styles = Style()
styles.configure("TNotebook", background="#afc8e1", borderwidth=0, relief=FLAT, highlightthickness=0)
#styles.configure("Treeview", borderwidth=0, relief=FLAT, width=100)
#styles.configure("TSeparator")
| PinguinoIDE/pinguino-ide-tk | tkgui/ide/styles.py | Python | gpl-2.0 | 582 |
"""
The minimum needed to take a response and render a response
- url mapper utility
- wsgiwrapper
"""
import json
from werkzeug.wrappers import Response
def add_ressource_uri(response, obj):
obj["ressource_uri"] = "/{0}/{1}/".format(
response.ressource_name,
obj[response.model.pk_field.name])
return obj
class JsonResponse(object):
"""
A werkzeug Response rendering a json representation of the object(s)
This class is callable. you should do :
.. code-block:: python:
view = JsonResponse(model, ressource_name, formaters=formaters,
**options)
return view(objects)
"""
render_format = "json"
def __init__(self, model, ressource_name,
formaters=["add_ressource_uri"], **options):
self.model = model
self.ressource_name = ressource_name
self.formaters = formaters
def __call__(self, *args, **kwargs):
"""
Return a response object
"""
meta = None
if "meta" in kwargs:
meta = kwargs.pop("meta")
if "objs" in kwargs:
objs = self.format(kwargs.pop('objs'))
if meta:
response = {"meta": meta,
"object_list": objs}
else:
response = objs
return Response(json.dumps(response),
mimetype="application/json",
**kwargs)
else:
response = ""
if args:
response = json.dumps(*args)
return Response(response,
mimetype="application/json",
**kwargs)
def format(self, objs):
"""
Format the output using formaters listed in self.formaters
"""
if isinstance(objs, list):
for elem in objs:
for formater in self.formaters:
if hasattr(formater, '__call__'):
elem = formater(self, elem)
else:
elem = globals()[formater](self, elem)
if isinstance(objs, dict):
for formater in self.formaters:
if hasattr(formater, '__call__'):
objs = formater(self, objs)
else:
objs = globals()[formater](self, objs)
return objs
| boblefrag/python-rest-api-framework | rest_api_framework/views.py | Python | mit | 2,433 |
# coding: utf-8
from setuptools import setup, find_packages
setup(
name='tc_librato',
version="0.0.1",
description='Thumbor Librato extensions',
author='Peter Schröder, Sebastian Eichner',
author_email='peter.schroeder@jimdo.com, sebastian.eichner@jimdo.com',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
install_requires=[
'thumbor',
'librato-metrics',
]
)
| thumbor-community/librato | setup.py | Python | mit | 441 |
"""Chatroom game."""
import logging
from dallinger import networks
from dallinger.compat import unicode
from dallinger.config import get_config
from dallinger.experiment import Experiment
from dallinger.nodes import Agent
try:
from .bots import Bot
Bot = Bot # Make name "Bot" importable without triggering style warnings
except ImportError:
pass
logger = logging.getLogger(__file__)
def extra_parameters():
config = get_config()
config.register("network", unicode)
config.register("repeats", int)
config.register("n", int)
class CoordinationChatroom(Experiment):
"""Define the structure of the experiment."""
def __init__(self, session=None):
"""Initialize the experiment."""
super(CoordinationChatroom, self).__init__(session)
if session:
self.setup()
def configure(self):
config = get_config()
self.experiment_repeats = repeats = config.get("repeats")
self.network_class = config.get("network")
self.quorum = config.get("n")
# Recruit for all networks at once
self.initial_recruitment_size = repeats * self.quorum
def create_network(self):
"""Create a new network by reading the configuration file."""
class_ = getattr(networks, self.network_class)
return class_(max_size=self.quorum)
def choose_network(self, networks, participant):
# Choose first available network rather than random
return networks[0]
def info_post_request(self, node, info):
"""Run when a request to create an info is complete."""
for agent in node.neighbors():
node.transmit(what=info, to_whom=agent)
def create_node(self, participant, network):
"""Create a node for a participant."""
return Agent(network=network, participant=participant)
| Dallinger/Dallinger | demos/dlgr/demos/chatroom/experiment.py | Python | mit | 1,854 |
from __future__ import print_function, absolute_import, division
import re
import copy
import operator
import itertools
import warnings
import mmap
from distutils.version import LooseVersion
import sys
import pytest
import astropy
from astropy import stats
from astropy.io import fits
from astropy import units as u
from astropy.wcs import WCS
from astropy.wcs import _wcs
from astropy.tests.helper import assert_quantity_allclose
from astropy.convolution import Gaussian2DKernel, Tophat2DKernel
from astropy.utils.exceptions import AstropyWarning
import numpy as np
from .. import (BooleanArrayMask,
FunctionMask, LazyMask, CompositeMask)
from ..spectral_cube import (OneDSpectrum, Projection,
VaryingResolutionOneDSpectrum,
LowerDimensionalObject)
from ..np_compat import allbadtonan
from .. import spectral_axis
from .. import base_class
from .. import utils
from .. import SpectralCube, VaryingResolutionSpectralCube, DaskSpectralCube
from . import path
from .helpers import assert_allclose, assert_array_equal
try:
import casatools
ia = casatools.image()
casaOK = True
except ImportError:
try:
from taskinit import ia
casaOK = True
except ImportError:
casaOK = False
WINDOWS = sys.platform == "win32"
# needed to test for warnings later
warnings.simplefilter('always', UserWarning)
warnings.simplefilter('error', utils.UnsupportedIterationStrategyWarning)
warnings.simplefilter('error', utils.NotImplementedWarning)
warnings.simplefilter('error', utils.WCSMismatchWarning)
warnings.simplefilter('error', FutureWarning)
warnings.filterwarnings(action='ignore', category=FutureWarning,
module='reproject')
try:
import yt
YT_INSTALLED = True
YT_LT_301 = LooseVersion(yt.__version__) < LooseVersion('3.0.1')
except ImportError:
YT_INSTALLED = False
YT_LT_301 = False
try:
import scipy
scipyOK = True
except ImportError:
scipyOK = False
import os
# if ON_TRAVIS is set, we're on travis.
on_travis = bool(os.environ.get('ON_TRAVIS'))
from radio_beam import Beam, Beams
from radio_beam.utils import BeamError
NUMPY_LT_19 = LooseVersion(np.__version__) < LooseVersion('1.9.0')
def cube_and_raw(filename, use_dask=None):
if use_dask is None:
raise ValueError('use_dask should be explicitly set')
p = path(filename)
if os.path.splitext(p)[-1] == '.fits':
with fits.open(p) as hdulist:
d = hdulist[0].data
c = SpectralCube.read(p, format='fits', mode='readonly', use_dask=use_dask)
elif os.path.splitext(p)[-1] == '.image':
ia.open(p)
d = ia.getchunk()
ia.unlock()
ia.close()
ia.done()
c = SpectralCube.read(p, format='casa_image', use_dask=use_dask)
else:
raise ValueError("Unsupported filetype")
return c, d
def test_arithmetic_warning(data_vda_jybeam_lower, recwarn, use_dask):
cube, data = cube_and_raw(data_vda_jybeam_lower, use_dask=use_dask)
assert not cube._is_huge
# make sure the small cube raises a warning about loading into memory
with pytest.warns(UserWarning, match='requires loading the entire'):
cube + 5*cube.unit
def test_huge_disallowed(data_vda_jybeam_lower, use_dask):
cube, data = cube_and_raw(data_vda_jybeam_lower, use_dask=use_dask)
assert not cube._is_huge
# We need to reduce the memory threshold rather than use a large cube to
# make sure we don't use too much memory during testing.
from .. import cube_utils
OLD_MEMORY_THRESHOLD = cube_utils.MEMORY_THRESHOLD
try:
cube_utils.MEMORY_THRESHOLD = 10
assert cube._is_huge
with pytest.raises(ValueError, match='entire cube into memory'):
cube + 5*cube.unit
if use_dask:
with pytest.raises(ValueError, match='entire cube into memory'):
cube.mad_std()
else:
with pytest.raises(ValueError, match='entire cube into memory'):
cube.max(how='cube')
cube.allow_huge_operations = True
# just make sure it doesn't fail
cube + 5*cube.unit
finally:
cube_utils.MEMORY_THRESHOLD = OLD_MEMORY_THRESHOLD
del cube
class BaseTest(object):
@pytest.fixture(autouse=True)
def setup_method_fixture(self, request, data_adv, use_dask):
c, d = cube_and_raw(data_adv, use_dask=use_dask)
mask = BooleanArrayMask(d > 0.5, c._wcs)
c._mask = mask
self.c = c
self.mask = mask
self.d = d
class BaseTestMultiBeams(object):
@pytest.fixture(autouse=True)
def setup_method_fixture(self, request, data_adv_beams, use_dask):
c, d = cube_and_raw(data_adv_beams, use_dask=use_dask)
mask = BooleanArrayMask(d > 0.5, c._wcs)
c._mask = mask
self.c = c
self.mask = mask
self.d = d
@pytest.fixture
def filename(request):
return request.getfixturevalue(request.param)
translist = [('data_advs', [0, 1, 2, 3]),
('data_dvsa', [2, 3, 0, 1]),
('data_sdav', [0, 2, 1, 3]),
('data_sadv', [0, 1, 2, 3]),
('data_vsad', [3, 0, 1, 2]),
('data_vad', [2, 0, 1]),
('data_vda', [0, 2, 1]),
('data_adv', [0, 1, 2]),
]
translist_vrsc = [('data_vda_beams', [0, 2, 1])]
class TestSpectralCube(object):
@pytest.mark.parametrize(('filename', 'trans'), translist + translist_vrsc,
indirect=['filename'])
def test_consistent_transposition(self, filename, trans, use_dask):
"""data() should return velocity axis first, then world 1, then world 0"""
c, d = cube_and_raw(filename, use_dask=use_dask)
expected = np.squeeze(d.transpose(trans))
assert_allclose(c._get_filled_data(), expected)
@pytest.mark.parametrize(('filename', 'view'), (
('data_adv', np.s_[:, :,:]),
('data_adv', np.s_[::2, :, :2]),
('data_adv', np.s_[0]),
), indirect=['filename'])
def test_world(self, filename, view, use_dask):
p = path(filename)
# d = fits.getdata(p)
# wcs = WCS(p)
# c = SpectralCube(d, wcs)
c = SpectralCube.read(p)
wcs = c.wcs
# shp = d.shape
# inds = np.indices(d.shape)
shp = c.shape
inds = np.indices(c.shape)
pix = np.column_stack([i.ravel() for i in inds[::-1]])
world = wcs.all_pix2world(pix, 0).T
world = [w.reshape(shp) for w in world]
world = [w[view] * u.Unit(wcs.wcs.cunit[i])
for i, w in enumerate(world)][::-1]
w2 = c.world[view]
for result, expected in zip(w2, world):
assert_allclose(result, expected)
# Test world_flattened here, too
w2_flat = c.flattened_world(view=view)
for result, expected in zip(w2_flat, world):
print(result.shape, expected.flatten().shape)
assert_allclose(result, expected.flatten())
@pytest.mark.parametrize('view', (np.s_[:, :,:],
np.s_[:2, :3, ::2]))
def test_world_transposes_3d(self, view, data_adv, data_vad, use_dask):
c1, d1 = cube_and_raw(data_adv, use_dask=use_dask)
c2, d2 = cube_and_raw(data_vad, use_dask=use_dask)
for w1, w2 in zip(c1.world[view], c2.world[view]):
assert_allclose(w1, w2)
@pytest.mark.parametrize('view',
(np.s_[:, :,:],
np.s_[:2, :3, ::2],
np.s_[::3, ::2, :1],
np.s_[:], ))
def test_world_transposes_4d(self, view, data_advs, data_sadv, use_dask):
c1, d1 = cube_and_raw(data_advs, use_dask=use_dask)
c2, d2 = cube_and_raw(data_sadv, use_dask=use_dask)
for w1, w2 in zip(c1.world[view], c2.world[view]):
assert_allclose(w1, w2)
@pytest.mark.parametrize(('filename','masktype','unit','suffix'),
itertools.product(('data_advs', 'data_dvsa', 'data_sdav', 'data_sadv', 'data_vsad', 'data_vad', 'data_adv',),
(BooleanArrayMask, LazyMask, FunctionMask, CompositeMask),
('Hz', u.Hz),
('.fits', '.image') if casaOK else ('.fits',)
),
indirect=['filename'])
def test_with_spectral_unit(self, filename, masktype, unit, suffix, use_dask):
if suffix == '.image':
if not use_dask:
pytest.skip()
import casatasks
filename = str(filename)
casatasks.importfits(filename, filename.replace('.fits', '.image'))
filename = filename.replace('.fits', '.image')
cube, data = cube_and_raw(filename, use_dask=use_dask)
cube_freq = cube.with_spectral_unit(unit)
if masktype == BooleanArrayMask:
# don't use data here:
# data haven't necessarily been rearranged to the correct shape by
# cube_utils.orient
mask = BooleanArrayMask(cube.filled_data[:].value>0,
wcs=cube._wcs)
elif masktype == LazyMask:
mask = LazyMask(lambda x: x>0, cube=cube)
elif masktype == FunctionMask:
mask = FunctionMask(lambda x: x>0)
elif masktype == CompositeMask:
mask1 = FunctionMask(lambda x: x>0)
mask2 = LazyMask(lambda x: x>0, cube)
mask = CompositeMask(mask1, mask2)
cube2 = cube.with_mask(mask)
cube_masked_freq = cube2.with_spectral_unit(unit)
if suffix == '.fits':
assert cube_freq._wcs.wcs.ctype[cube_freq._wcs.wcs.spec] == 'FREQ-W2F'
assert cube_masked_freq._wcs.wcs.ctype[cube_masked_freq._wcs.wcs.spec] == 'FREQ-W2F'
assert cube_masked_freq._mask._wcs.wcs.ctype[cube_masked_freq._mask._wcs.wcs.spec] == 'FREQ-W2F'
elif suffix == '.image':
# this is *not correct* but it's a known failure in CASA: CASA's
# image headers don't support any of the FITS spectral standard, so
# it just ends up as 'FREQ'. This isn't on us to fix so this is
# really an "xfail" that we hope will change...
assert cube_freq._wcs.wcs.ctype[cube_freq._wcs.wcs.spec] == 'FREQ'
assert cube_masked_freq._wcs.wcs.ctype[cube_masked_freq._wcs.wcs.spec] == 'FREQ'
assert cube_masked_freq._mask._wcs.wcs.ctype[cube_masked_freq._mask._wcs.wcs.spec] == 'FREQ'
# values taken from header
rest = 1.42040571841E+09*u.Hz
crval = -3.21214698632E+05*u.m/u.s
outcv = crval.to(u.m, u.doppler_optical(rest)).to(u.Hz, u.spectral())
assert_allclose(cube_freq._wcs.wcs.crval[cube_freq._wcs.wcs.spec],
outcv.to(u.Hz).value)
assert_allclose(cube_masked_freq._wcs.wcs.crval[cube_masked_freq._wcs.wcs.spec],
outcv.to(u.Hz).value)
assert_allclose(cube_masked_freq._mask._wcs.wcs.crval[cube_masked_freq._mask._wcs.wcs.spec],
outcv.to(u.Hz).value)
@pytest.mark.parametrize(('operation', 'value'),
((operator.mul, 0.5*u.K),
(operator.truediv, 0.5*u.K),
))
def test_apply_everywhere(self, operation, value, data_advs, use_dask):
c1, d1 = cube_and_raw(data_advs, use_dask=use_dask)
# append 'o' to indicate that it has been operated on
c1o = c1._apply_everywhere(operation, value, check_units=True)
d1o = operation(u.Quantity(d1, u.K), value)
assert np.all(d1o == c1o.filled_data[:])
# allclose fails on identical data?
#assert_allclose(d1o, c1o.filled_data[:])
@pytest.mark.parametrize(('operation', 'value'), ((operator.add, 0.5*u.K),
(operator.sub, 0.5*u.K),))
def test_apply_everywhere_plusminus(self, operation, value, data_advs, use_dask):
c1, d1 = cube_and_raw(data_advs, use_dask=use_dask)
assert c1.unit == value.unit
# append 'o' to indicate that it has been operated on
# value.value: the __add__ function explicitly drops the units
c1o = c1._apply_everywhere(operation, value.value, check_units=False)
d1o = operation(u.Quantity(d1, u.K), value)
assert c1o.unit == c1.unit
assert c1o.unit == value.unit
assert np.all(d1o == c1o.filled_data[:])
del c1
del c1o
# This test appears to leave things open even if we delete variables
#@pytest.mark.parametrize(('operation', 'value'),
# ((operator.div if hasattr(operator,'div') else operator.floordiv, 0.5*u.K),))
#def test_apply_everywhere_floordivide(self, operation, value, data_advs, use_dask):
# c1, d1 = cube_and_raw(data_advs, use_dask=use_dask)
# # floordiv doesn't work, which is why it's NotImplemented
# with pytest.raises(u.UnitCoversionError):
# c1o = c1._apply_everywhere(operation, value)
# del c1
@pytest.mark.parametrize(('filename', 'trans'), translist, indirect=['filename'])
def test_getitem(self, filename, trans, use_dask):
c, d = cube_and_raw(filename, use_dask=use_dask)
expected = np.squeeze(d.transpose(trans))
assert_allclose(c[0,:,:].value, expected[0,:,:])
assert_allclose(c[:,:,0].value, expected[:,:,0])
assert_allclose(c[:,0,:].value, expected[:,0,:])
# Not implemented:
#assert_allclose(c[0,0,:].value, expected[0,0,:])
#assert_allclose(c[0,:,0].value, expected[0,:,0])
assert_allclose(c[:,0,0].value, expected[:,0,0])
assert_allclose(c[1,:,:].value, expected[1,:,:])
assert_allclose(c[:,:,1].value, expected[:,:,1])
assert_allclose(c[:,1,:].value, expected[:,1,:])
# Not implemented:
#assert_allclose(c[1,1,:].value, expected[1,1,:])
#assert_allclose(c[1,:,1].value, expected[1,:,1])
assert_allclose(c[:,1,1].value, expected[:,1,1])
c2 = c.with_spectral_unit(u.km/u.s, velocity_convention='radio')
assert_allclose(c2[0,:,:].value, expected[0,:,:])
assert_allclose(c2[:,:,0].value, expected[:,:,0])
assert_allclose(c2[:,0,:].value, expected[:,0,:])
# Not implemented:
#assert_allclose(c2[0,0,:].value, expected[0,0,:])
#assert_allclose(c2[0,:,0].value, expected[0,:,0])
assert_allclose(c2[:,0,0].value, expected[:,0,0])
assert_allclose(c2[1,:,:].value, expected[1,:,:])
assert_allclose(c2[:,:,1].value, expected[:,:,1])
assert_allclose(c2[:,1,:].value, expected[:,1,:])
# Not implemented:
#assert_allclose(c2[1,1,:].value, expected[1,1,:])
#assert_allclose(c2[1,:,1].value, expected[1,:,1])
assert_allclose(c2[:,1,1].value, expected[:,1,1])
@pytest.mark.parametrize(('filename', 'trans'), translist_vrsc, indirect=['filename'])
def test_getitem_vrsc(self, filename, trans, use_dask):
c, d = cube_and_raw(filename, use_dask=use_dask)
expected = np.squeeze(d.transpose(trans))
# No pv slices for VRSC.
assert_allclose(c[0,:,:].value, expected[0,:,:])
# Not implemented:
#assert_allclose(c[0,0,:].value, expected[0,0,:])
#assert_allclose(c[0,:,0].value, expected[0,:,0])
assert_allclose(c[:,0,0].value, expected[:,0,0])
assert_allclose(c[1,:,:].value, expected[1,:,:])
# Not implemented:
#assert_allclose(c[1,1,:].value, expected[1,1,:])
#assert_allclose(c[1,:,1].value, expected[1,:,1])
assert_allclose(c[:,1,1].value, expected[:,1,1])
c2 = c.with_spectral_unit(u.km/u.s, velocity_convention='radio')
assert_allclose(c2[0,:,:].value, expected[0,:,:])
# Not implemented:
#assert_allclose(c2[0,0,:].value, expected[0,0,:])
#assert_allclose(c2[0,:,0].value, expected[0,:,0])
assert_allclose(c2[:,0,0].value, expected[:,0,0])
assert_allclose(c2[1,:,:].value, expected[1,:,:])
# Not implemented:
#assert_allclose(c2[1,1,:].value, expected[1,1,:])
#assert_allclose(c2[1,:,1].value, expected[1,:,1])
assert_allclose(c2[:,1,1].value, expected[:,1,1])
class TestArithmetic(object):
# FIXME: in the tests below we need to manually do self.c1 = self.d1 = None
# because if we try and do this in a teardown method, the open-files check
# gets done first. This is an issue that should be resolved in pytest-openfiles.
@pytest.fixture(autouse=True)
def setup_method_fixture(self, request, data_adv_simple, use_dask):
self.c1, self.d1 = cube_and_raw(data_adv_simple, use_dask=use_dask)
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_add(self,value):
d2 = self.d1 + value
c2 = self.c1 + value*u.K
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
with pytest.raises(ValueError,
match="Can only add cube objects from SpectralCubes or Quantities with a unit attribute."):
# c1 is something with Kelvin units, but you can't add a scalar
_ = self.c1 + value
with pytest.raises(u.UnitConversionError,
match=re.escape("'Jy' (spectral flux density) and 'K' (temperature) are not convertible")):
# c1 is something with Kelvin units, but you can't add a scalar
_ = self.c1 + value*u.Jy
# cleanup
self.c1 = self.d1 = None
def test_add_cubes(self):
d2 = self.d1 + self.d1
c2 = self.c1 + self.c1
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
self.c1 = self.d1 = None
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_subtract(self, value):
d2 = self.d1 - value
c2 = self.c1 - value*u.K
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
# regression test #251: the _data attribute must not be a quantity
assert not hasattr(c2._data, 'unit')
self.c1 = self.d1 = None
def test_subtract_cubes(self):
d2 = self.d1 - self.d1
c2 = self.c1 - self.c1
assert np.all(d2 == c2.filled_data[:].value)
assert np.all(c2.filled_data[:].value == 0)
assert c2.unit == u.K
# regression test #251: the _data attribute must not be a quantity
assert not hasattr(c2._data, 'unit')
self.c1 = self.d1 = None
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_mul(self, value):
d2 = self.d1 * value
c2 = self.c1 * value
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
self.c1 = self.d1 = None
def test_mul_cubes(self):
d2 = self.d1 * self.d1
c2 = self.c1 * self.c1
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K**2
self.c1 = self.d1 = None
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_div(self, value):
d2 = self.d1 / value
c2 = self.c1 / value
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
self.c1 = self.d1 = None
def test_div_cubes(self):
d2 = self.d1 / self.d1
c2 = self.c1 / self.c1
assert np.all((d2 == c2.filled_data[:].value) | (np.isnan(c2.filled_data[:])))
assert np.all((c2.filled_data[:] == 1) | (np.isnan(c2.filled_data[:])))
assert c2.unit == u.one
self.c1 = self.d1 = None
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0))
def test_floordiv(self, value):
with pytest.raises(NotImplementedError,
match=re.escape("Floor-division (division with truncation) "
"is not supported.")):
c2 = self.c1 // value
self.c1 = self.d1 = None
@pytest.mark.parametrize(('value'),(1,1.0,2,2.0)*u.K)
def test_floordiv_fails(self, value):
with pytest.raises(NotImplementedError,
match=re.escape("Floor-division (division with truncation) "
"is not supported.")):
c2 = self.c1 // value
self.c1 = self.d1 = None
def test_floordiv_cubes(self):
with pytest.raises(NotImplementedError,
match=re.escape("Floor-division (division with truncation) "
"is not supported.")):
c2 = self.c1 // self.c1
self.c1 = self.d1 = None
@pytest.mark.parametrize(('value'),
(1,1.0,2,2.0))
def test_pow(self, value):
d2 = self.d1 ** value
c2 = self.c1 ** value
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K**value
self.c1 = self.d1 = None
def test_cube_add(self):
c2 = self.c1 + self.c1
d2 = self.d1 + self.d1
assert np.all(d2 == c2.filled_data[:].value)
assert c2.unit == u.K
self.c1 = self.d1 = None
class TestFilters(BaseTest):
def test_mask_data(self):
c, d = self.c, self.d
expected = np.where(d > .5, d, np.nan)
assert_allclose(c._get_filled_data(), expected)
expected = np.where(d > .5, d, 0)
assert_allclose(c._get_filled_data(fill=0), expected)
self.c = self.d = None
@pytest.mark.parametrize('operation', (operator.lt, operator.gt, operator.le, operator.ge))
def test_mask_comparison(self, operation):
c, d = self.c, self.d
dmask = operation(d, 0.6) & self.c.mask.include()
cmask = operation(c, 0.6*u.K)
assert (self.c.mask.include() & cmask.include()).sum() == dmask.sum()
assert np.all(c.with_mask(cmask).mask.include() == dmask)
np.testing.assert_almost_equal(c.with_mask(cmask).sum().value,
d[dmask].sum())
self.c = self.d = None
def test_flatten(self):
c, d = self.c, self.d
expected = d[d > 0.5]
assert_allclose(c.flattened(), expected)
self.c = self.d = None
def test_flatten_weights(self):
c, d = self.c, self.d
expected = d[d > 0.5] ** 2
assert_allclose(c.flattened(weights=d), expected)
self.c = self.d = None
def test_slice(self):
c, d = self.c, self.d
expected = d[:3, :2, ::2]
expected = expected[expected > 0.5]
assert_allclose(c[0:3, 0:2, 0::2].flattened(), expected)
self.c = self.d = None
class TestNumpyMethods(BaseTest):
def _check_numpy(self, cubemethod, array, func):
for axis in [None, 0, 1, 2]:
for how in ['auto', 'slice', 'cube', 'ray']:
expected = func(array, axis=axis)
actual = cubemethod(axis=axis)
assert_allclose(actual, expected)
def test_sum(self):
d = np.where(self.d > 0.5, self.d, np.nan)
self._check_numpy(self.c.sum, d, allbadtonan(np.nansum))
# Need a secondary check to make sure it works with no
# axis keyword being passed (regression test for issue introduced in
# 150)
assert np.all(self.c.sum().value == np.nansum(d))
self.c = self.d = None
def test_max(self):
d = np.where(self.d > 0.5, self.d, np.nan)
self._check_numpy(self.c.max, d, np.nanmax)
self.c = self.d = None
def test_min(self):
d = np.where(self.d > 0.5, self.d, np.nan)
self._check_numpy(self.c.min, d, np.nanmin)
self.c = self.d = None
def test_argmax(self):
d = np.where(self.d > 0.5, self.d, -10)
self._check_numpy(self.c.argmax, d, np.nanargmax)
self.c = self.d = None
def test_argmin(self):
d = np.where(self.d > 0.5, self.d, 10)
self._check_numpy(self.c.argmin, d, np.nanargmin)
self.c = self.d = None
@pytest.mark.parametrize('iterate_rays', (True,False))
def test_median(self, iterate_rays, use_dask):
# Make sure that medians ignore empty/bad/NaN values
m = np.empty(self.d.shape[1:])
for y in range(m.shape[0]):
for x in range(m.shape[1]):
ray = self.d[:, y, x]
# the cube mask is for values >0.5
ray = ray[ray > 0.5]
m[y, x] = np.median(ray)
if use_dask:
if iterate_rays:
self.c = self.d = None
pytest.skip()
else:
scmed = self.c.median(axis=0)
else:
scmed = self.c.median(axis=0, iterate_rays=iterate_rays)
assert_allclose(scmed, m)
assert not np.any(np.isnan(scmed.value))
assert scmed.unit == self.c.unit
self.c = self.d = None
@pytest.mark.skipif('NUMPY_LT_19')
def test_bad_median_apply(self):
# this is a test for manually-applied numpy medians, which are different
# from the cube.median method that does "the right thing"
#
# for regular median, we expect a failure, which is why we don't use
# regular median.
scmed = self.c.apply_numpy_function(np.median, axis=0)
# this checks whether numpy <=1.9.3 has a bug?
# as far as I can tell, np==1.9.3 no longer has this bug/feature
#if LooseVersion(np.__version__) <= LooseVersion('1.9.3'):
# # print statements added so we get more info in the travis builds
# print("Numpy version is: {0}".format(LooseVersion(np.__version__)))
# assert np.count_nonzero(np.isnan(scmed)) == 5
#else:
# print("Numpy version is: {0}".format(LooseVersion(np.__version__)))
assert np.count_nonzero(np.isnan(scmed)) == 6
scmed = self.c.apply_numpy_function(np.nanmedian, axis=0)
assert np.count_nonzero(np.isnan(scmed)) == 0
# use a more aggressive mask to force there to be some all-nan axes
m2 = self.c>0.74*self.c.unit
scmed = self.c.with_mask(m2).apply_numpy_function(np.nanmedian, axis=0)
assert np.count_nonzero(np.isnan(scmed)) == 1
self.c = self.d = None
@pytest.mark.parametrize('iterate_rays', (True,False))
def test_bad_median(self, iterate_rays, use_dask):
# This should have the same result as np.nanmedian, though it might be
# faster if bottleneck loads
if use_dask:
if iterate_rays:
self.c = self.d = None
pytest.skip()
else:
scmed = self.c.median(axis=0)
else:
scmed = self.c.median(axis=0, iterate_rays=iterate_rays)
assert np.count_nonzero(np.isnan(scmed)) == 0
m2 = self.c>0.74*self.c.unit
if use_dask:
scmed = self.c.with_mask(m2).median(axis=0)
else:
scmed = self.c.with_mask(m2).median(axis=0, iterate_rays=iterate_rays)
assert np.count_nonzero(np.isnan(scmed)) == 1
self.c = self.d = None
@pytest.mark.parametrize(('pct', 'iterate_rays'),
(zip((3,25,50,75,97)*2,(True,)*5 + (False,)*5)))
def test_percentile(self, pct, iterate_rays, use_dask):
m = np.empty(self.d.sum(axis=0).shape)
for y in range(m.shape[0]):
for x in range(m.shape[1]):
ray = self.d[:, y, x]
ray = ray[ray > 0.5]
m[y, x] = np.percentile(ray, pct)
if use_dask:
if iterate_rays:
self.c = self.d = None
pytest.skip()
else:
scpct = self.c.percentile(pct, axis=0)
else:
scpct = self.c.percentile(pct, axis=0, iterate_rays=iterate_rays)
assert_allclose(scpct, m)
assert not np.any(np.isnan(scpct.value))
assert scpct.unit == self.c.unit
self.c = self.d = None
@pytest.mark.parametrize('method', ('sum', 'min', 'max', 'std', 'mad_std',
'median', 'argmin', 'argmax'))
def test_transpose(self, method, data_adv, data_vad, use_dask):
c1, d1 = cube_and_raw(data_adv, use_dask=use_dask)
c2, d2 = cube_and_raw(data_vad, use_dask=use_dask)
for axis in [None, 0, 1, 2]:
assert_allclose(getattr(c1, method)(axis=axis),
getattr(c2, method)(axis=axis))
if not use_dask:
# check that all these accept progressbar kwargs
assert_allclose(getattr(c1, method)(axis=axis, progressbar=True),
getattr(c2, method)(axis=axis, progressbar=True))
self.c = self.d = None
@pytest.mark.parametrize('method', ('argmax_world', 'argmin_world'))
def test_transpose_arg_world(self, method, data_adv, data_vad, use_dask):
c1, d1 = cube_and_raw(data_adv, use_dask=use_dask)
c2, d2 = cube_and_raw(data_vad, use_dask=use_dask)
# The spectral axis should work in all of these test cases.
axis = 0
assert_allclose(getattr(c1, method)(axis=axis),
getattr(c2, method)(axis=axis))
if not use_dask:
# check that all these accept progressbar kwargs
assert_allclose(getattr(c1, method)(axis=axis, progressbar=True),
getattr(c2, method)(axis=axis, progressbar=True))
# But the spatial axes should fail since the pixel axes are correlated to
# the WCS celestial axes. Currently this will happen for ALL celestial axes.
for axis in [1, 2]:
with pytest.raises(utils.WCSCelestialError,
match=re.escape(f"{method} requires the celestial axes")):
assert_allclose(getattr(c1, method)(axis=axis),
getattr(c2, method)(axis=axis))
self.c = self.d = None
@pytest.mark.parametrize('method', ('argmax_world', 'argmin_world'))
def test_arg_world(self, method, data_adv, use_dask):
c1, d1 = cube_and_raw(data_adv, use_dask=use_dask)
# Pixel operation is same name with "_world" removed.
arg0_pixel = getattr(c1, method.split("_")[0])(axis=0)
arg0_world = np.take_along_axis(c1.spectral_axis[:, np.newaxis, np.newaxis],
arg0_pixel[np.newaxis, :, :], axis=0).squeeze()
assert_allclose(getattr(c1, method)(axis=0), arg0_world)
self.c = self.d = None
class TestSlab(BaseTest):
def test_closest_spectral_channel(self):
c = self.c
ms = u.m / u.s
assert c.closest_spectral_channel(-321214.698632 * ms) == 0
assert c.closest_spectral_channel(-319926.48366321 * ms) == 1
assert c.closest_spectral_channel(-318638.26869442 * ms) == 2
assert c.closest_spectral_channel(-320000 * ms) == 1
assert c.closest_spectral_channel(-340000 * ms) == 0
assert c.closest_spectral_channel(0 * ms) == 3
self.c = self.d = None
def test_spectral_channel_bad_units(self):
with pytest.raises(u.UnitsError,
match=re.escape("'value' should be in frequency equivalent or velocity units (got s)")):
self.c.closest_spectral_channel(1 * u.s)
with pytest.raises(u.UnitsError,
match=re.escape("Spectral axis is in velocity units and 'value' is in frequency-equivalent units - use SpectralCube.with_spectral_unit first to convert the cube to frequency-equivalent units, or search for a velocity instead")):
self.c.closest_spectral_channel(1. * u.Hz)
self.c = self.d = None
def test_slab(self):
ms = u.m / u.s
c2 = self.c.spectral_slab(-320000 * ms, -318600 * ms)
assert_allclose(c2._data, self.d[1:3])
assert c2._mask is not None
self.c = self.d = None
def test_slab_reverse_limits(self):
ms = u.m / u.s
c2 = self.c.spectral_slab(-318600 * ms, -320000 * ms)
assert_allclose(c2._data, self.d[1:3])
assert c2._mask is not None
self.c = self.d = None
def test_slab_preserves_wcs(self):
# regression test
ms = u.m / u.s
crpix = list(self.c._wcs.wcs.crpix)
self.c.spectral_slab(-318600 * ms, -320000 * ms)
assert list(self.c._wcs.wcs.crpix) == crpix
self.c = self.d = None
class TestSlabMultiBeams(BaseTestMultiBeams, TestSlab):
""" same tests with multibeams """
pass
# class TestRepr(BaseTest):
# def test_repr(self):
# assert repr(self.c) == """
# SpectralCube with shape=(4, 3, 2) and unit=K:
# n_x: 2 type_x: RA---SIN unit_x: deg range: 24.062698 deg: 24.063349 deg
# n_y: 3 type_y: DEC--SIN unit_y: deg range: 29.934094 deg: 29.935209 deg
# n_s: 4 type_s: VOPT unit_s: km / s range: -321.215 km / s: -317.350 km / s
# """.strip()
# self.c = self.d = None
# def test_repr_withunit(self):
# self.c._unit = u.Jy
# assert repr(self.c) == """
# SpectralCube with shape=(4, 3, 2) and unit=Jy:
# n_x: 2 type_x: RA---SIN unit_x: deg range: 24.062698 deg: 24.063349 deg
# n_y: 3 type_y: DEC--SIN unit_y: deg range: 29.934094 deg: 29.935209 deg
# n_s: 4 type_s: VOPT unit_s: km / s range: -321.215 km / s: -317.350 km / s
# """.strip()
# self.c = self.d = None
@pytest.mark.skipif('not YT_INSTALLED')
class TestYt():
@pytest.fixture(autouse=True)
def setup_method_fixture(self, request, data_adv, use_dask):
print("HERE")
self.cube = SpectralCube.read(data_adv, use_dask=use_dask)
# Without any special arguments
print(self.cube)
print(self.cube.to_yt)
self.ytc1 = self.cube.to_yt()
# With spectral factor = 0.5
self.spectral_factor = 0.5
self.ytc2 = self.cube.to_yt(spectral_factor=self.spectral_factor)
# With nprocs = 4
self.nprocs = 4
self.ytc3 = self.cube.to_yt(nprocs=self.nprocs)
print("DONE")
def test_yt(self):
# The following assertions just make sure everything is
# kosher with the datasets generated in different ways
ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3
ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset
assert_array_equal(ds1.domain_dimensions, ds2.domain_dimensions)
assert_array_equal(ds2.domain_dimensions, ds3.domain_dimensions)
assert_allclose(ds1.domain_left_edge.value, ds2.domain_left_edge.value)
assert_allclose(ds2.domain_left_edge.value, ds3.domain_left_edge.value)
assert_allclose(ds1.domain_width.value,
ds2.domain_width.value*np.array([1,1,1.0/self.spectral_factor]))
assert_allclose(ds1.domain_width.value, ds3.domain_width.value)
assert self.nprocs == len(ds3.index.grids)
ds1.index
ds2.index
ds3.index
unit1 = ds1.field_info["fits","flux"].units
unit2 = ds2.field_info["fits","flux"].units
unit3 = ds3.field_info["fits","flux"].units
ds1.quan(1.0,unit1)
ds2.quan(1.0,unit2)
ds3.quan(1.0,unit3)
self.cube = self.ytc1 = self.ytc2 = self.ytc3 = None
@pytest.mark.skipif('YT_LT_301', reason='yt 3.0 has a FITS-related bug')
def test_yt_fluxcompare(self):
# Now check that we can compute quantities of the flux
# and that they are equal
ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3
ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset
dd1 = ds1.all_data()
dd2 = ds2.all_data()
dd3 = ds3.all_data()
flux1_tot = dd1.quantities.total_quantity("flux")
flux2_tot = dd2.quantities.total_quantity("flux")
flux3_tot = dd3.quantities.total_quantity("flux")
flux1_min, flux1_max = dd1.quantities.extrema("flux")
flux2_min, flux2_max = dd2.quantities.extrema("flux")
flux3_min, flux3_max = dd3.quantities.extrema("flux")
assert flux1_tot == flux2_tot
assert flux1_tot == flux3_tot
assert flux1_min == flux2_min
assert flux1_min == flux3_min
assert flux1_max == flux2_max
assert flux1_max == flux3_max
self.cube = self.ytc1 = self.ytc2 = self.ytc3 = None
def test_yt_roundtrip_wcs(self):
# Now test round-trip conversions between yt and world coordinates
ytc1,ytc2,ytc3 = self.ytc1,self.ytc2,self.ytc3
ds1,ds2,ds3 = ytc1.dataset, ytc2.dataset, ytc3.dataset
yt_coord1 = ds1.domain_left_edge + np.random.random(size=3)*ds1.domain_width
world_coord1 = ytc1.yt2world(yt_coord1)
assert_allclose(ytc1.world2yt(world_coord1), yt_coord1.value)
yt_coord2 = ds2.domain_left_edge + np.random.random(size=3)*ds2.domain_width
world_coord2 = ytc2.yt2world(yt_coord2)
assert_allclose(ytc2.world2yt(world_coord2), yt_coord2.value)
yt_coord3 = ds3.domain_left_edge + np.random.random(size=3)*ds3.domain_width
world_coord3 = ytc3.yt2world(yt_coord3)
assert_allclose(ytc3.world2yt(world_coord3), yt_coord3.value)
self.cube = self.ytc1 = self.ytc2 = self.ytc3 = None
def test_read_write_rountrip(tmpdir, data_adv, use_dask):
cube = SpectralCube.read(data_adv, use_dask=use_dask)
tmp_file = str(tmpdir.join('test.fits'))
cube.write(tmp_file)
cube2 = SpectralCube.read(tmp_file, use_dask=use_dask)
assert cube.shape == cube.shape
assert_allclose(cube._data, cube2._data)
if (((hasattr(_wcs, '__version__')
and LooseVersion(_wcs.__version__) < LooseVersion('5.9'))
or not hasattr(_wcs, '__version__'))):
# see https://github.com/astropy/astropy/pull/3992 for reasons:
# we should upgrade this for 5.10 when the absolute accuracy is
# maximized
assert cube._wcs.to_header_string() == cube2._wcs.to_header_string()
# in 5.11 and maybe even 5.12, the round trip fails. Maybe
# https://github.com/astropy/astropy/issues/4292 will solve it?
@pytest.mark.parametrize(('memmap', 'base'),
((True, mmap.mmap),
(False, None)))
def test_read_memmap(memmap, base, data_adv):
cube = SpectralCube.read(data_adv, memmap=memmap)
bb = cube.base
while hasattr(bb, 'base'):
bb = bb.base
if base is None:
assert bb is None
else:
assert isinstance(bb, base)
def _dummy_cube(use_dask):
data = np.array([[[0, 1, 2, 3, 4]]])
wcs = WCS(naxis=3)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VELO-HEL']
def lower_threshold(data, wcs, view=()):
return data[view] > 0
m1 = FunctionMask(lower_threshold)
cube = SpectralCube(data, wcs=wcs, mask=m1, use_dask=use_dask)
return cube
def test_with_mask(use_dask):
def upper_threshold(data, wcs, view=()):
return data[view] < 3
m2 = FunctionMask(upper_threshold)
cube = _dummy_cube(use_dask)
cube2 = cube.with_mask(m2)
assert_allclose(cube._get_filled_data(), [[[np.nan, 1, 2, 3, 4]]])
assert_allclose(cube2._get_filled_data(), [[[np.nan, 1, 2, np.nan, np.nan]]])
def test_with_mask_with_boolean_array(use_dask):
cube = _dummy_cube(use_dask)
mask = np.random.random(cube.shape) > 0.5
cube2 = cube.with_mask(mask, inherit_mask=False)
assert isinstance(cube2._mask, BooleanArrayMask)
assert cube2._mask._wcs is cube._wcs
assert cube2._mask._mask is mask
def test_with_mask_with_good_array_shape(use_dask):
cube = _dummy_cube(use_dask)
mask = np.zeros((1, 5), dtype=np.bool)
cube2 = cube.with_mask(mask, inherit_mask=False)
assert isinstance(cube2._mask, BooleanArrayMask)
np.testing.assert_equal(cube2._mask._mask, mask.reshape((1, 1, 5)))
def test_with_mask_with_bad_array_shape(use_dask):
cube = _dummy_cube(use_dask)
mask = np.zeros((5, 5), dtype=np.bool)
with pytest.raises(ValueError) as exc:
cube.with_mask(mask)
assert exc.value.args[0] == ("Mask shape is not broadcastable to data shape: "
"(5, 5) vs (1, 1, 5)")
class TestMasks(BaseTest):
@pytest.mark.parametrize('op', (operator.gt, operator.lt,
operator.le, operator.ge))
def test_operator_threshold(self, op):
# choose thresh to exercise proper equality tests
thresh = self.d.ravel()[0]
m = op(self.c, thresh*u.K)
self.c._mask = m
expected = self.d[op(self.d, thresh)]
actual = self.c.flattened()
assert_allclose(actual, expected)
self.c = self.d = None
def test_preserve_spectral_unit(data_advs, use_dask):
# astropy.wcs has a tendancy to change spectral units from e.g. km/s to
# m/s, so we have a workaround - check that it works.
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube_freq = cube.with_spectral_unit(u.GHz)
assert cube_freq.wcs.wcs.cunit[2] == 'Hz' # check internal
assert cube_freq.spectral_axis.unit is u.GHz
# Check that this preferred unit is propagated
new_cube = cube_freq.with_fill_value(fill_value=3.4)
assert new_cube.spectral_axis.unit is u.GHz
def test_endians(use_dask):
"""
Test that the endianness checking returns something in Native form
(this is only needed for non-numpy functions that worry about the
endianness of their data)
WARNING: Because the endianness is machine-dependent, this may fail on
different architectures! This is because numpy automatically converts
little-endian to native in the dtype parameter; I need a workaround for
this.
"""
pytest.importorskip('bottleneck')
big = np.array([[[1],[2]]], dtype='>f4')
lil = np.array([[[1],[2]]], dtype='<f4')
mywcs = WCS(naxis=3)
mywcs.wcs.ctype[0] = 'RA'
mywcs.wcs.ctype[1] = 'DEC'
mywcs.wcs.ctype[2] = 'VELO'
bigcube = SpectralCube(data=big, wcs=mywcs, use_dask=use_dask)
xbig = bigcube._get_filled_data(check_endian=True)
lilcube = SpectralCube(data=lil, wcs=mywcs, use_dask=use_dask)
xlil = lilcube._get_filled_data(check_endian=True)
assert xbig.dtype.byteorder == '='
assert xlil.dtype.byteorder == '='
xbig = bigcube._get_filled_data(check_endian=False)
xlil = lilcube._get_filled_data(check_endian=False)
assert xbig.dtype.byteorder == '>'
assert xlil.dtype.byteorder == '='
def test_header_naxis(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
assert cube.header['NAXIS'] == 3 # NOT data.ndim == 4
assert cube.header['NAXIS1'] == data.shape[3]
assert cube.header['NAXIS2'] == data.shape[2]
assert cube.header['NAXIS3'] == data.shape[1]
assert 'NAXIS4' not in cube.header
def test_slicing(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask)
# just to check that we're starting in the right place
assert cube.shape == (2,3,4)
sl = cube[:,1,:]
assert sl.shape == (2,4)
v = cube[1:2,:,:]
assert v.shape == (1,3,4)
# make sure this works. Not sure what keys to test for...
v.header
assert cube[:,:,:].shape == (2,3,4)
assert cube[:,:].shape == (2,3,4)
assert cube[:].shape == (2,3,4)
assert cube[:1,:1,:1].shape == (1,1,1)
@pytest.mark.parametrize(('view','naxis'),
[((slice(None), 1, slice(None)), 2),
((1, slice(None), slice(None)), 2),
((slice(None), slice(None), 1), 2),
((slice(None), slice(None), slice(1)), 3),
((slice(1), slice(1), slice(1)), 3),
((slice(None, None, -1), slice(None), slice(None)), 3),
])
def test_slice_wcs(view, naxis, data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
sl = cube[view]
assert sl.wcs.naxis == naxis
# Ensure slices work without a beam
cube._beam = None
sl = cube[view]
assert sl.wcs.naxis == naxis
def test_slice_wcs_reversal(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
view = (slice(None,None,-1), slice(None), slice(None))
rcube = cube[view]
rrcube = rcube[view]
np.testing.assert_array_equal(np.diff(cube.spectral_axis),
-np.diff(rcube.spectral_axis))
np.testing.assert_array_equal(rrcube.spectral_axis.value,
cube.spectral_axis.value)
np.testing.assert_array_equal(rcube.spectral_axis.value,
cube.spectral_axis.value[::-1])
np.testing.assert_array_equal(rrcube.world_extrema.value,
cube.world_extrema.value)
# check that the lon, lat arrays are *entirely* unchanged
np.testing.assert_array_equal(rrcube.spatial_coordinate_map[0].value,
cube.spatial_coordinate_map[0].value)
np.testing.assert_array_equal(rrcube.spatial_coordinate_map[1].value,
cube.spatial_coordinate_map[1].value)
def test_spectral_slice_preserve_units(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube = cube.with_spectral_unit(u.km/u.s)
sl = cube[:,0,0]
assert cube._spectral_unit == u.km/u.s
assert sl._spectral_unit == u.km/u.s
assert cube.spectral_axis.unit == u.km/u.s
assert sl.spectral_axis.unit == u.km/u.s
def test_header_units_consistent(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube_ms = cube.with_spectral_unit(u.m/u.s)
cube_kms = cube.with_spectral_unit(u.km/u.s)
cube_Mms = cube.with_spectral_unit(u.Mm/u.s)
assert cube.header['CUNIT3'] == 'km s-1'
assert cube_ms.header['CUNIT3'] == 'm s-1'
assert cube_kms.header['CUNIT3'] == 'km s-1'
assert cube_Mms.header['CUNIT3'] == 'Mm s-1'
# Wow, the tolerance here is really terrible...
assert_allclose(cube_Mms.header['CDELT3'], cube.header['CDELT3']/1e3,rtol=1e-3,atol=1e-5)
assert_allclose(cube.header['CDELT3'], cube_kms.header['CDELT3'],rtol=1e-2,atol=1e-5)
assert_allclose(cube.header['CDELT3']*1e3, cube_ms.header['CDELT3'],rtol=1e-2,atol=1e-5)
cube_freq = cube.with_spectral_unit(u.Hz)
assert cube_freq.header['CUNIT3'] == 'Hz'
cube_freq_GHz = cube.with_spectral_unit(u.GHz)
assert cube_freq_GHz.header['CUNIT3'] == 'GHz'
def test_spectral_unit_conventions(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube_frq = cube.with_spectral_unit(u.Hz)
cube_opt = cube.with_spectral_unit(u.km/u.s,
rest_value=cube_frq.spectral_axis[0],
velocity_convention='optical')
cube_rad = cube.with_spectral_unit(u.km/u.s,
rest_value=cube_frq.spectral_axis[0],
velocity_convention='radio')
cube_rel = cube.with_spectral_unit(u.km/u.s,
rest_value=cube_frq.spectral_axis[0],
velocity_convention='relativistic')
# should all be exactly 0 km/s
for x in (cube_rel.spectral_axis[0], cube_rad.spectral_axis[0],
cube_opt.spectral_axis[0]):
np.testing.assert_almost_equal(0,x.value)
assert cube_rel.spectral_axis[1] != cube_rad.spectral_axis[1]
assert cube_opt.spectral_axis[1] != cube_rad.spectral_axis[1]
assert cube_rel.spectral_axis[1] != cube_opt.spectral_axis[1]
assert cube_rel.velocity_convention == u.doppler_relativistic
assert cube_rad.velocity_convention == u.doppler_radio
assert cube_opt.velocity_convention == u.doppler_optical
def test_invalid_spectral_unit_conventions(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
with pytest.raises(ValueError,
match=("Velocity convention must be radio, optical, "
"or relativistic.")):
cube.with_spectral_unit(u.km/u.s,
velocity_convention='invalid velocity convention')
@pytest.mark.parametrize('rest', (50, 50*u.K))
def test_invalid_rest(rest, data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
with pytest.raises(ValueError,
match=("Rest value must be specified as an astropy "
"quantity with spectral equivalence.")):
cube.with_spectral_unit(u.km/u.s,
velocity_convention='radio',
rest_value=rest)
def test_airwave_to_wave(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._wcs.wcs.ctype[2] = 'AWAV'
cube._wcs.wcs.cunit[2] = 'm'
cube._spectral_unit = u.m
cube._wcs.wcs.cdelt[2] = 1e-7
cube._wcs.wcs.crval[2] = 5e-7
ax1 = cube.spectral_axis
ax2 = cube.with_spectral_unit(u.m).spectral_axis
np.testing.assert_almost_equal(spectral_axis.air_to_vac(ax1).value,
ax2.value)
@pytest.mark.parametrize(('func','how','axis','filename'),
itertools.product(('sum','std','max','min','mean'),
('slice','cube','auto'),
(0,1,2),
('data_advs', 'data_advs_nobeam'),
), indirect=['filename'])
def test_twod_numpy(func, how, axis, filename, use_dask):
# Check that a numpy function returns the correct result when applied along
# one axis
# This is partly a regression test for #211
cube, data = cube_and_raw(filename, use_dask=use_dask)
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
if use_dask:
if how != 'cube':
pytest.skip()
else:
proj = getattr(cube,func)(axis=axis)
else:
proj = getattr(cube,func)(axis=axis, how=how)
# data has a redundant 1st axis
dproj = getattr(data,func)(axis=(0,axis+1)).squeeze()
assert isinstance(proj, Projection)
np.testing.assert_equal(proj.value, dproj)
assert cube.unit == proj.unit
@pytest.mark.parametrize(('func','how','axis','filename'),
itertools.product(('sum','std','max','min','mean'),
('slice','cube','auto'),
((0,1),(1,2),(0,2)),
('data_advs', 'data_advs_nobeam'),
), indirect=['filename'])
def test_twod_numpy_twoaxes(func, how, axis, filename, use_dask):
# Check that a numpy function returns the correct result when applied along
# one axis
# This is partly a regression test for #211
cube, data = cube_and_raw(filename, use_dask=use_dask)
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
with warnings.catch_warnings(record=True) as wrn:
if use_dask:
if how != 'cube':
pytest.skip()
else:
spec = getattr(cube,func)(axis=axis)
else:
spec = getattr(cube,func)(axis=axis, how=how)
if func == 'mean' and axis != (1,2):
assert 'Averaging over a spatial and a spectral' in str(wrn[-1].message)
# data has a redundant 1st axis
dspec = getattr(data.squeeze(),func)(axis=axis)
if axis == (1,2):
assert isinstance(spec, OneDSpectrum)
assert cube.unit == spec.unit
np.testing.assert_almost_equal(spec.value, dspec)
else:
np.testing.assert_almost_equal(spec, dspec)
def test_preserves_header_values(data_advs, use_dask):
# Check that the non-WCS header parameters are preserved during projection
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
cube._header['OBJECT'] = 'TestName'
if use_dask:
proj = cube.sum(axis=0)
else:
proj = cube.sum(axis=0, how='auto')
assert isinstance(proj, Projection)
assert proj.header['OBJECT'] == 'TestName'
assert proj.hdu.header['OBJECT'] == 'TestName'
def test_preserves_header_meta_values(data_advs, use_dask):
# Check that additional parameters in meta are preserved
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube.meta['foo'] = 'bar'
assert cube.header['FOO'] == 'bar'
# check that long keywords are also preserved
cube.meta['too_long_keyword'] = 'too_long_information'
assert 'too_long_keyword=too_long_information' in cube.header['COMMENT']
if use_dask:
proj = cube.sum(axis=0)
else:
proj = cube.sum(axis=0, how='auto')
# Checks that the header is preserved when passed to LDOs
for ldo in (proj, cube[:,0,0]):
assert isinstance(ldo, LowerDimensionalObject)
assert ldo.header['FOO'] == 'bar'
assert ldo.hdu.header['FOO'] == 'bar'
# make sure that the meta preservation works on the LDOs themselves too
ldo.meta['bar'] = 'foo'
assert ldo.header['BAR'] == 'foo'
assert 'too_long_keyword=too_long_information' in ldo.header['COMMENT']
@pytest.mark.parametrize(('func', 'filename'),
itertools.product(('sum','std','max','min','mean'),
('data_advs', 'data_advs_nobeam',),
), indirect=['filename'])
def test_oned_numpy(func, filename, use_dask):
# Check that a numpy function returns an appropriate spectrum
cube, data = cube_and_raw(filename, use_dask=use_dask)
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = getattr(cube,func)(axis=(1,2))
dspec = getattr(data,func)(axis=(2,3)).squeeze()
assert isinstance(spec, (OneDSpectrum, VaryingResolutionOneDSpectrum))
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, dspec)
assert cube.unit == spec.unit
def test_oned_slice(data_advs, use_dask):
# Check that a slice returns an appropriate spectrum
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = cube[:,0,0]
assert isinstance(spec, OneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, data[0,:,0,0])
assert cube.unit == spec.unit
assert spec.header['BUNIT'] == cube.header['BUNIT']
def test_oned_slice_beams(data_sdav_beams, use_dask):
# Check that a slice returns an appropriate spectrum
cube, data = cube_and_raw(data_sdav_beams, use_dask=use_dask)
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = cube[:,0,0]
assert isinstance(spec, VaryingResolutionOneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, data[:,0,0,0])
assert cube.unit == spec.unit
assert spec.header['BUNIT'] == cube.header['BUNIT']
assert hasattr(spec, 'beams')
assert 'BMAJ' in spec.hdulist[1].data.names
def test_subcube_slab_beams(data_sdav_beams, use_dask):
cube, data = cube_and_raw(data_sdav_beams, use_dask=use_dask)
slcube = cube[1:]
assert all(slcube.hdulist[1].data['CHAN'] == np.arange(slcube.shape[0]))
try:
# Make sure Beams has been sliced correctly
assert all(cube.beams[1:] == slcube.beams)
except TypeError:
# in 69eac9241220d3552c06b173944cb7cdebeb47ef, radio_beam switched to
# returning a single value
assert cube.beams[1:] == slcube.beams
# collapsing to one dimension raywise doesn't make sense and is therefore
# not supported.
@pytest.mark.parametrize('how', ('auto', 'cube', 'slice'))
def test_oned_collapse(how, data_advs, use_dask):
# Check that an operation along the spatial dims returns an appropriate
# spectrum
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
if use_dask:
if how != 'cube':
pytest.skip()
else:
spec = cube.mean(axis=(1,2))
else:
spec = cube.mean(axis=(1,2), how=how)
assert isinstance(spec, OneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, data.mean(axis=(0,2,3)))
assert cube.unit == spec.unit
assert spec.header['BUNIT'] == cube.header['BUNIT']
def test_oned_collapse_beams(data_sdav_beams, use_dask):
# Check that an operation along the spatial dims returns an appropriate
# spectrum
cube, data = cube_and_raw(data_sdav_beams, use_dask=use_dask)
cube._meta['BUNIT'] = 'K'
cube._unit = u.K
spec = cube.mean(axis=(1,2))
assert isinstance(spec, VaryingResolutionOneDSpectrum)
# data has a redundant 1st axis
np.testing.assert_equal(spec.value, data.mean(axis=(1,2,3)))
assert cube.unit == spec.unit
assert spec.header['BUNIT'] == cube.header['BUNIT']
assert hasattr(spec, 'beams')
assert 'BMAJ' in spec.hdulist[1].data.names
def test_preserve_bunit(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
assert cube.header['BUNIT'] == 'K'
hdul = fits.open(data_advs)
hdu = hdul[0]
hdu.header['BUNIT'] = 'Jy'
cube = SpectralCube.read(hdu)
assert cube.unit == u.Jy
assert cube.header['BUNIT'] == 'Jy'
hdul.close()
def test_preserve_beam(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
beam = Beam.from_fits_header(str(data_advs))
assert cube.beam == beam
def test_beam_attach_to_header(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
header = cube._header.copy()
del header["BMAJ"], header["BMIN"], header["BPA"]
newcube = SpectralCube(data=data, wcs=cube.wcs, header=header,
beam=cube.beam)
assert cube.header["BMAJ"] == newcube.header["BMAJ"]
assert cube.header["BMIN"] == newcube.header["BMIN"]
assert cube.header["BPA"] == newcube.header["BPA"]
# Should be in meta too
assert newcube.meta['beam'] == cube.beam
def test_beam_custom(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
header = cube._header.copy()
beam = Beam.from_fits_header(header)
del header["BMAJ"], header["BMIN"], header["BPA"]
newcube = SpectralCube(data=data, wcs=cube.wcs, header=header)
# newcube should now not have a beam
# Should raise exception
try:
newcube.beam
except utils.NoBeamError:
pass
# Attach the beam
newcube = newcube.with_beam(beam=beam)
assert newcube.beam == cube.beam
# Header should be updated
assert cube.header["BMAJ"] == newcube.header["BMAJ"]
assert cube.header["BMIN"] == newcube.header["BMIN"]
assert cube.header["BPA"] == newcube.header["BPA"]
# Should be in meta too
assert newcube.meta['beam'] == cube.beam
# Try changing the beam properties
newbeam = Beam(beam.major * 2)
newcube2 = newcube.with_beam(beam=newbeam)
assert newcube2.beam == newbeam
# Header should be updated
assert newcube2.header["BMAJ"] == newbeam.major.value
assert newcube2.header["BMIN"] == newbeam.minor.value
assert newcube2.header["BPA"] == newbeam.pa.value
# Should be in meta too
assert newcube2.meta['beam'] == newbeam
def test_cube_with_no_beam(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
header = cube._header.copy()
beam = Beam.from_fits_header(header)
del header["BMAJ"], header["BMIN"], header["BPA"]
newcube = SpectralCube(data=data, wcs=cube.wcs, header=header)
# Accessing beam raises an error
try:
newcube.beam
except utils.NoBeamError:
pass
# But is still has a beam attribute
assert hasattr(newcube, "_beam")
# Attach the beam
newcube = newcube.with_beam(beam=beam)
# But now it should have an accessible beam
try:
newcube.beam
except utils.NoBeamError as exc:
raise exc
def test_multibeam_custom(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
# Make a new set of beams that differs from the original.
new_beams = Beams([1.] * cube.shape[0] * u.deg)
# Attach the beam
newcube = cube.with_beams(new_beams, raise_error_jybm=False)
try:
assert all(new_beams == newcube.beams)
except TypeError:
# in 69eac9241220d3552c06b173944cb7cdebeb47ef, radio_beam switched to
# returning a single value
assert new_beams == newcube.beams
@pytest.mark.openfiles_ignore
@pytest.mark.xfail(raises=ValueError, strict=True)
def test_multibeam_custom_wrongshape(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
# Make a new set of beams that differs from the original.
new_beams = Beams([1.] * cube.shape[0] * u.deg)
# Attach the beam
cube.with_beams(new_beams[:1], raise_error_jybm=False)
@pytest.mark.openfiles_ignore
@pytest.mark.xfail(raises=utils.BeamUnitsError, strict=True)
def test_multibeam_jybm_error(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
# Make a new set of beams that differs from the original.
new_beams = Beams([1.] * cube.shape[0] * u.deg)
# Attach the beam
newcube = cube.with_beams(new_beams, raise_error_jybm=True)
def test_multibeam_slice(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
assert isinstance(cube, VaryingResolutionSpectralCube)
np.testing.assert_almost_equal(cube.beams[0].major.value, 0.4)
np.testing.assert_almost_equal(cube.beams[0].minor.value, 0.1)
np.testing.assert_almost_equal(cube.beams[3].major.value, 0.4)
scube = cube[:2,:,:]
np.testing.assert_almost_equal(scube.beams[0].major.value, 0.4)
np.testing.assert_almost_equal(scube.beams[0].minor.value, 0.1)
np.testing.assert_almost_equal(scube.beams[1].major.value, 0.3)
np.testing.assert_almost_equal(scube.beams[1].minor.value, 0.2)
flatslice = cube[0,:,:]
np.testing.assert_almost_equal(flatslice.header['BMAJ'],
(0.4/3600.))
# Test returning a VRODS
spec = cube[:, 0, 0]
assert (cube.beams == spec.beams).all()
# And make sure that Beams gets slice for part of a spectrum
spec_part = cube[:1, 0, 0]
assert cube.beams[0] == spec.beams[0]
def test_basic_unit_conversion(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
assert cube.unit == u.K
mKcube = cube.to(u.mK)
np.testing.assert_almost_equal(mKcube.filled_data[:].value,
(cube.filled_data[:].value *
1e3))
def test_basic_unit_conversion_beams(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
cube._unit = u.K # want beams, but we want to force the unit to be something non-beamy
cube._meta['BUNIT'] = 'K'
assert cube.unit == u.K
mKcube = cube.to(u.mK)
np.testing.assert_almost_equal(mKcube.filled_data[:].value,
(cube.filled_data[:].value *
1e3))
bunits_list = [u.Jy / u.beam, u.K, u.Jy / u.sr, u.Jy / u.pix, u.Jy / u.arcsec**2,
u.mJy / u.beam, u.mK]
@pytest.mark.parametrize(('init_unit'), bunits_list)
def test_unit_conversions_general(data_advs, use_dask, init_unit):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = init_unit.to_string()
cube._unit = init_unit
# Check all unit conversion combos:
for targ_unit in bunits_list:
newcube = cube.to(targ_unit)
if init_unit == targ_unit:
np.testing.assert_almost_equal(newcube.filled_data[:].value,
cube.filled_data[:].value)
else:
roundtrip_cube = newcube.to(init_unit)
np.testing.assert_almost_equal(roundtrip_cube.filled_data[:].value,
cube.filled_data[:].value)
@pytest.mark.parametrize(('init_unit'), bunits_list)
def test_multibeam_unit_conversions_general(data_vda_beams, use_dask, init_unit):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
cube._meta['BUNIT'] = init_unit.to_string()
cube._unit = init_unit
# Check all unit conversion combos:
for targ_unit in bunits_list:
newcube = cube.to(targ_unit)
if init_unit == targ_unit:
np.testing.assert_almost_equal(newcube.filled_data[:].value,
cube.filled_data[:].value)
else:
roundtrip_cube = newcube.to(init_unit)
np.testing.assert_almost_equal(roundtrip_cube.filled_data[:].value,
cube.filled_data[:].value)
def test_beam_jpix_checks_array(data_advs, use_dask):
'''
Ensure round-trip consistency in our defined K -> Jy/pix conversions.
'''
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = 'Jy / beam'
cube._unit = u.Jy/u.beam
jtok = cube.beam.jtok(cube.with_spectral_unit(u.GHz).spectral_axis)
pixperbeam = cube.pixels_per_beam * u.pix
cube_jypix = cube.to(u.Jy / u.pix)
np.testing.assert_almost_equal(cube_jypix.filled_data[:].value,
(cube.filled_data[:].value /
pixperbeam).value)
Kcube = cube.to(u.K)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube_jypix.filled_data[:].value *
jtok[:,None,None] * pixperbeam).value)
# Round trips.
roundtrip_cube = cube_jypix.to(u.Jy / u.beam)
np.testing.assert_almost_equal(cube.filled_data[:].value,
roundtrip_cube.filled_data[:].value)
Kcube_from_jypix = cube_jypix.to(u.K)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
Kcube_from_jypix.filled_data[:].value)
def test_multibeam_jpix_checks_array(data_vda_beams, use_dask):
'''
Ensure round-trip consistency in our defined K -> Jy/pix conversions.
'''
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
cube._meta['BUNIT'] = 'Jy / beam'
cube._unit = u.Jy/u.beam
# NOTE: We are no longer using jtok_factors for conversions. This may need to be removed
# in the future
jtok = cube.jtok_factors()
pixperbeam = cube.pixels_per_beam * u.pix
cube_jypix = cube.to(u.Jy / u.pix)
np.testing.assert_almost_equal(cube_jypix.filled_data[:].value,
(cube.filled_data[:].value /
pixperbeam[:, None, None]).value)
Kcube = cube.to(u.K)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube_jypix.filled_data[:].value *
jtok[:,None,None] *
pixperbeam[:, None, None]).value)
# Round trips.
roundtrip_cube = cube_jypix.to(u.Jy / u.beam)
np.testing.assert_almost_equal(cube.filled_data[:].value,
roundtrip_cube.filled_data[:].value)
Kcube_from_jypix = cube_jypix.to(u.K)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
Kcube_from_jypix.filled_data[:].value)
def test_beam_jtok_array(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
cube._meta['BUNIT'] = 'Jy / beam'
cube._unit = u.Jy/u.beam
jtok = cube.beam.jtok(cube.with_spectral_unit(u.GHz).spectral_axis)
# test that the beam equivalencies are correctly automatically defined
Kcube = cube.to(u.K)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube.filled_data[:].value *
jtok[:,None,None]).value)
def test_multibeam_jtok_array(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
assert cube.meta['BUNIT'].strip() == 'Jy / beam'
assert cube.unit.is_equivalent(u.Jy/u.beam)
#equiv = [bm.jtok_equiv(frq) for bm, frq in zip(cube.beams, cube.with_spectral_unit(u.GHz).spectral_axis)]
jtok = u.Quantity([bm.jtok(frq) for bm, frq in zip(cube.beams, cube.with_spectral_unit(u.GHz).spectral_axis)])
# don't try this, it's nonsense for the multibeam case
# Kcube = cube.to(u.K, equivalencies=equiv)
# np.testing.assert_almost_equal(Kcube.filled_data[:].value,
# (cube.filled_data[:].value *
# jtok[:,None,None]).value)
# test that the beam equivalencies are correctly automatically defined
Kcube = cube.to(u.K)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube.filled_data[:].value *
jtok[:,None,None]).value)
def test_beam_jtok(data_advs, use_dask):
# regression test for an error introduced when the previous test was solved
# (the "is this an array?" test used len(x) where x could be scalar)
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
# technically this should be jy/beam, but astropy's equivalency doesn't
# handle this yet
cube._meta['BUNIT'] = 'Jy'
cube._unit = u.Jy
equiv = cube.beam.jtok_equiv(np.median(cube.with_spectral_unit(u.GHz).spectral_axis))
jtok = cube.beam.jtok(np.median(cube.with_spectral_unit(u.GHz).spectral_axis))
Kcube = cube.to(u.K, equivalencies=equiv)
np.testing.assert_almost_equal(Kcube.filled_data[:].value,
(cube.filled_data[:].value *
jtok).value)
def test_varyres_moment(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
assert isinstance(cube, VaryingResolutionSpectralCube)
# the beams are very different, but for this test we don't care
cube.beam_threshold = 1.0
with pytest.warns(UserWarning, match="Arithmetic beam averaging is being performed"):
m0 = cube.moment0()
assert_quantity_allclose(m0.meta['beam'].major, 0.35*u.arcsec)
def test_varyres_unitconversion_roundtrip(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
assert isinstance(cube, VaryingResolutionSpectralCube)
assert cube.unit == u.Jy/u.beam
roundtrip = cube.to(u.mJy/u.beam).to(u.Jy/u.beam)
assert_quantity_allclose(cube.filled_data[:], roundtrip.filled_data[:])
# you can't straightforwardly roundtrip to Jy/beam yet
# it requires a per-beam equivalency, which is why there's
# a specific hack to go from Jy/beam (in each channel) -> K
def test_append_beam_to_hdr(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
orig_hdr = fits.getheader(data_advs)
assert cube.header['BMAJ'] == orig_hdr['BMAJ']
assert cube.header['BMIN'] == orig_hdr['BMIN']
assert cube.header['BPA'] == orig_hdr['BPA']
def test_cube_with_swapped_axes(data_vda, use_dask):
"""
Regression test for #208
"""
cube, data = cube_and_raw(data_vda, use_dask=use_dask)
# Check that masking works (this should apply a lazy mask)
cube.filled_data[:]
def test_jybeam_upper(data_vda_jybeam_upper, use_dask):
cube, data = cube_and_raw(data_vda_jybeam_upper, use_dask=use_dask)
assert cube.unit == u.Jy/u.beam
assert hasattr(cube, 'beam')
np.testing.assert_almost_equal(cube.beam.sr.value,
(((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value)
def test_jybeam_lower(data_vda_jybeam_lower, use_dask):
cube, data = cube_and_raw(data_vda_jybeam_lower, use_dask=use_dask)
assert cube.unit == u.Jy/u.beam
assert hasattr(cube, 'beam')
np.testing.assert_almost_equal(cube.beam.sr.value,
(((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value)
def test_jybeam_whitespace(data_vda_jybeam_whitespace, use_dask):
# Regression test for #257 (https://github.com/radio-astro-tools/spectral-cube/pull/257)
cube, data = cube_and_raw(data_vda_jybeam_whitespace, use_dask=use_dask)
assert cube.unit == u.Jy/u.beam
assert hasattr(cube, 'beam')
np.testing.assert_almost_equal(cube.beam.sr.value,
(((1*u.arcsec/np.sqrt(8*np.log(2)))**2).to(u.sr)*2*np.pi).value)
def test_beam_proj_meta(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
moment = cube.moment0(axis=0)
# regression test for #250
assert 'beam' in moment.meta
assert 'BMAJ' in moment.hdu.header
slc = cube[0,:,:]
assert 'beam' in slc.meta
proj = cube.max(axis=0)
assert 'beam' in proj.meta
def test_proj_meta(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
moment = cube.moment0(axis=0)
assert 'BUNIT' in moment.meta
assert moment.meta['BUNIT'] == 'K'
slc = cube[0,:,:]
assert 'BUNIT' in slc.meta
assert slc.meta['BUNIT'] == 'K'
proj = cube.max(axis=0)
assert 'BUNIT' in proj.meta
assert proj.meta['BUNIT'] == 'K'
def test_pix_sign(data_advs, use_dask):
cube, data = cube_and_raw(data_advs, use_dask=use_dask)
s,y,x = (cube._pix_size_slice(ii) for ii in range(3))
assert s>0
assert y>0
assert x>0
cube.wcs.wcs.cdelt *= -1
s,y,x = (cube._pix_size_slice(ii) for ii in range(3))
assert s>0
assert y>0
assert x>0
cube.wcs.wcs.pc *= -1
s,y,x = (cube._pix_size_slice(ii) for ii in range(3))
assert s>0
assert y>0
assert x>0
def test_varyres_moment_logic_issue364(data_vda_beams, use_dask):
""" regression test for issue364 """
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
assert isinstance(cube, VaryingResolutionSpectralCube)
# the beams are very different, but for this test we don't care
cube.beam_threshold = 1.0
with pytest.warns(UserWarning, match="Arithmetic beam averaging is being performed"):
# note that cube.moment(order=0) is different from cube.moment0()
# because cube.moment0() calls cube.moment(order=0, axis=(whatever)),
# but cube.moment doesn't necessarily have to receive the axis kwarg
m0 = cube.moment(order=0)
# note that this is just a sanity check; one should never use the average beam
assert_quantity_allclose(m0.meta['beam'].major, 0.35*u.arcsec)
@pytest.mark.skipif('not casaOK')
@pytest.mark.parametrize('filename', ['data_vda_beams',
'data_vda_beams_image'],
indirect=['filename'])
def test_mask_bad_beams(filename, use_dask):
"""
Prior to #543, this tested two different scenarios of beam masking. After
that, the tests got mucked up because we can no longer have minor>major in
the beams.
"""
if 'image' in str(filename) and not use_dask:
pytest.skip()
cube, data = cube_and_raw(filename, use_dask=use_dask)
assert isinstance(cube, base_class.MultiBeamMixinClass)
# make sure all of the beams are initially good (finite)
assert np.all(cube.goodbeams_mask)
# make sure cropping the cube maintains the mask
assert np.all(cube[:3].goodbeams_mask)
# middle two beams have same area
masked_cube = cube.mask_out_bad_beams(0.01,
reference_beam=Beam(0.3*u.arcsec,
0.2*u.arcsec,
60*u.deg))
assert np.all(masked_cube.mask.include()[:,0,0] == [False,True,True,False])
assert np.all(masked_cube.goodbeams_mask == [False,True,True,False])
mean = masked_cube.mean(axis=0)
assert np.all(mean == cube[1:3,:,:].mean(axis=0))
#doesn't test anything any more
# masked_cube2 = cube.mask_out_bad_beams(0.5,)
# mean2 = masked_cube2.mean(axis=0)
# assert np.all(mean2 == (cube[2,:,:]+cube[1,:,:])/2)
# assert np.all(masked_cube2.goodbeams_mask == [False,True,True,False])
def test_convolve_to_equal(data_vda, use_dask):
cube, data = cube_and_raw(data_vda, use_dask=use_dask)
convolved = cube.convolve_to(cube.beam)
assert np.all(convolved.filled_data[:].value == cube.filled_data[:].value)
# And one channel
plane = cube[0]
convolved = plane.convolve_to(cube.beam)
assert np.all(convolved.value == plane.value)
# Pass a kwarg to the convolution function
convolved = plane.convolve_to(cube.beam, nan_treatment='fill')
def test_convolve_to(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
convolved = cube.convolve_to(Beam(0.5*u.arcsec))
# Pass a kwarg to the convolution function
convolved = cube.convolve_to(Beam(0.5*u.arcsec),
nan_treatment='fill')
def test_convolve_to_jybeam_onebeam(point_source_5_one_beam, use_dask):
cube, data = cube_and_raw(point_source_5_one_beam, use_dask=use_dask)
convolved = cube.convolve_to(Beam(10*u.arcsec))
# The peak of the point source should remain constant in Jy/beam
np.testing.assert_allclose(convolved[:, 5, 5].value, cube[:, 5, 5].value, atol=1e-5, rtol=1e-5)
assert cube.unit == u.Jy / u.beam
def test_convolve_to_jybeam_multibeams(point_source_5_spectral_beams, use_dask):
cube, data = cube_and_raw(point_source_5_spectral_beams, use_dask=use_dask)
convolved = cube.convolve_to(Beam(10*u.arcsec))
# The peak of the point source should remain constant in Jy/beam
np.testing.assert_allclose(convolved[:, 5, 5].value, cube[:, 5, 5].value, atol=1e-5, rtol=1e-5)
assert cube.unit == u.Jy / u.beam
def test_convolve_to_with_bad_beams(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
convolved = cube.convolve_to(Beam(0.5*u.arcsec))
# From: https://github.com/radio-astro-tools/radio-beam/pull/87
# updated exception to BeamError when the beam cannot be deconvolved.
# BeamError is not new in the radio_beam package, only its use here.
# Keeping the ValueError for testing against <v0.3.3 versions
with pytest.raises((BeamError, ValueError),
match="Beam could not be deconvolved"):
# should not work: biggest beam is 0.4"
convolved = cube.convolve_to(Beam(0.35*u.arcsec))
# middle two beams are smaller than 0.4
masked_cube = cube.mask_channels([False, True, True, False])
# should work: biggest beam is 0.3 arcsec (major)
convolved = masked_cube.convolve_to(Beam(0.35*u.arcsec))
# this is a copout test; should really check for correctness...
assert np.all(np.isfinite(convolved.filled_data[1:3]))
def test_jybeam_factors(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
assert_allclose(cube.jtok_factors(),
[15111171.12641629, 10074201.06746361, 10074287.73828087,
15111561.14508185],
rtol=5e-7
)
def test_channelmask_singlebeam(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
masked_cube = cube.mask_channels([False, True, True, False])
assert np.all(masked_cube.mask.include()[:,0,0] == [False, True, True, False])
def test_mad_std(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
if int(astropy.__version__[0]) < 2:
with pytest.raises(NotImplementedError) as exc:
cube.mad_std()
else:
# mad_std run manually on data
result = np.array([[0.3099842, 0.2576232],
[0.1822292, 0.6101782],
[0.2819404, 0.2084236]])
np.testing.assert_almost_equal(cube.mad_std(axis=0).value, result)
mcube = cube.with_mask(cube < 0.98*u.K)
result2 = np.array([[0.3099842, 0.2576232],
[0.1822292, 0.6101782],
[0.2819404, 0.2084236]])
np.testing.assert_almost_equal(mcube.mad_std(axis=0).value, result2)
def test_mad_std_nan(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
# HACK in a nan
data[1, 1, 0] = np.nan
hdu = copy.copy(cube.hdu)
hdu.data = copy.copy(data)
# use the include-everything mask so we're really testing that nan is
# ignored
oldmask = copy.copy(cube.mask)
if use_dask:
cube = DaskSpectralCube.read(hdu)
else:
cube = SpectralCube.read(hdu)
if int(astropy.__version__[0]) < 2:
with pytest.raises(NotImplementedError) as exc:
cube.mad_std()
else:
# mad_std run manually on data
# (note: would have entry [1,0] = nan in bad case)
result = np.array([[0.30998422, 0.25762317],
[0.24100427, 0.6101782 ],
[0.28194039, 0.20842358]])
resultB = stats.mad_std(data, axis=0, ignore_nan=True)
# this test is to make sure we're testing against the right stuff
np.testing.assert_almost_equal(result, resultB)
assert cube.mask.include().sum() == 23
np.testing.assert_almost_equal(cube.mad_std(axis=0).value, result)
# run the test with the inclusive mask
cube._mask = oldmask
assert cube.mask.include().sum() == 24
np.testing.assert_almost_equal(cube.mad_std(axis=0).value, result)
# try to force closure
del hdu
del cube
del data
del oldmask
del result
def test_mad_std_params(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
# mad_std run manually on data
result = np.array([[0.3099842, 0.2576232],
[0.1822292, 0.6101782],
[0.2819404, 0.2084236]])
if use_dask:
np.testing.assert_almost_equal(cube.mad_std(axis=0).value, result)
cube.mad_std(axis=1)
cube.mad_std(axis=(1, 2))
else:
np.testing.assert_almost_equal(cube.mad_std(axis=0, how='cube').value, result)
np.testing.assert_almost_equal(cube.mad_std(axis=0, how='ray').value, result)
with pytest.raises(NotImplementedError):
cube.mad_std(axis=0, how='slice')
with pytest.raises(NotImplementedError):
cube.mad_std(axis=1, how='slice')
with pytest.raises(NotImplementedError):
cube.mad_std(axis=(1,2), how='ray')
def test_caching(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
assert len(cube._cache) == 0
worldextrema = cube.world_extrema
assert len(cube._cache) == 1
# see https://stackoverflow.com/questions/46181936/access-a-parent-class-property-getter-from-the-child-class
world_extrema_function = base_class.SpatialCoordMixinClass.world_extrema.fget.wrapped_function
assert cube.world_extrema is cube._cache[(world_extrema_function, ())]
np.testing.assert_almost_equal(worldextrema.value,
cube.world_extrema.value)
def test_spatial_smooth_g2d(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
# Guassian 2D smoothing test
g2d = Gaussian2DKernel(3)
cube_g2d = cube.spatial_smooth(g2d)
# Check first slice
result0 = np.array([[0.0585795, 0.0588712],
[0.0612525, 0.0614312],
[0.0576757, 0.057723 ]])
np.testing.assert_almost_equal(cube_g2d[0].value, result0)
# Check third slice
result2 = np.array([[0.027322 , 0.027257 ],
[0.0280423, 0.02803 ],
[0.0259688, 0.0260123]])
np.testing.assert_almost_equal(cube_g2d[2].value, result2)
def test_spatial_smooth_preserves_unit(data_adv, use_dask):
"""
Regression test for issue527
"""
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
cube._unit = u.K
# Guassian 2D smoothing test
g2d = Gaussian2DKernel(3)
cube_g2d = cube.spatial_smooth(g2d)
assert cube_g2d.unit == u.K
def test_spatial_smooth_t2d(data_adv, use_dask):
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
# Tophat 2D smoothing test
t2d = Tophat2DKernel(3)
cube_t2d = cube.spatial_smooth(t2d)
# Check first slice
result0 = np.array([[0.1265607, 0.1265607],
[0.1265607, 0.1265607],
[0.1265607, 0.1265607]])
np.testing.assert_almost_equal(cube_t2d[0].value, result0)
# Check third slice
result2 = np.array([[0.0585135, 0.0585135],
[0.0585135, 0.0585135],
[0.0585135, 0.0585135]])
np.testing.assert_almost_equal(cube_t2d[2].value, result2)
@pytest.mark.openfiles_ignore
@pytest.mark.parametrize('filename', ['point_source_5_one_beam', 'point_source_5_spectral_beams'],
indirect=['filename'])
@pytest.mark.xfail(raises=utils.BeamUnitsError, strict=True)
def test_spatial_smooth_jybm_error(filename, use_dask):
'''Raise an error when Jy/beam units are getting spatially smoothed. This tests SCs and VRSCs'''
cube, data = cube_and_raw(filename, use_dask=use_dask)
# Tophat 2D smoothing test
t2d = Tophat2DKernel(3)
cube_t2d = cube.spatial_smooth(t2d)
@pytest.mark.openfiles_ignore
@pytest.mark.parametrize('filename', ['point_source_5_one_beam', 'point_source_5_spectral_beams'],
indirect=['filename'])
@pytest.mark.xfail(raises=utils.BeamUnitsError, strict=True)
def test_spatial_smooth_median_jybm_error(filename, use_dask):
'''Raise an error when Jy/beam units are getting spatially median smoothed. This tests SCs and VRSCs'''
cube, data = cube_and_raw(filename, use_dask=use_dask)
cube_median = cube.spatial_smooth_median(3)
def test_spatial_smooth_median(data_adv, use_dask):
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
cube_median = cube.spatial_smooth_median(3)
# Check first slice
result0 = np.array([[0.8172354, 0.9038805],
[0.7068793, 0.8172354],
[0.7068793, 0.7068793]])
np.testing.assert_almost_equal(cube_median[0].value, result0)
# Check third slice
result2 = np.array([[0.3038468, 0.3038468],
[0.303744 , 0.3038468],
[0.1431722, 0.303744 ]])
np.testing.assert_almost_equal(cube_median[2].value, result2)
@pytest.mark.parametrize('num_cores', (None, 1))
def test_spectral_smooth_median(num_cores, data_adv, use_dask):
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
cube_spectral_median = cube.spectral_smooth_median(3, num_cores=num_cores)
# Check first slice
result = np.array([0.9038805, 0.1431722, 0.1431722, 0.9662900])
np.testing.assert_almost_equal(cube_spectral_median[:,1,1].value, result)
@pytest.mark.skipif('WINDOWS')
def test_spectral_smooth_median_4cores(data_adv, use_dask):
pytest.importorskip('joblib')
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
cube_spectral_median = cube.spectral_smooth_median(3, num_cores=4)
# Check first slice
result = np.array([0.9038805, 0.1431722, 0.1431722, 0.9662900])
np.testing.assert_almost_equal(cube_spectral_median[:,1,1].value, result)
def update_function():
print("Update Function Call")
@pytest.mark.skipif('WINDOWS')
def test_smooth_update_function_parallel(capsys, data_adv):
pytest.importorskip('joblib')
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw(data_adv, use_dask=False)
# this is potentially a major disaster: if update_function can't be
# pickled, it won't work, which is why update_function is (very badly)
# defined outside of this function
cube_spectral_median = cube.spectral_smooth_median(3, num_cores=4,
update_function=update_function)
sys.stdout.flush()
captured = capsys.readouterr()
assert captured.out == "Update Function Call\n"*6
def test_smooth_update_function_serial(capsys, data_adv):
# This function only makes sense for the plain SpectralCube class
pytest.importorskip('scipy.ndimage')
cube, data = cube_and_raw(data_adv, use_dask=False)
def update_function():
print("Update Function Call")
cube_spectral_median = cube.spectral_smooth_median(3, num_cores=1, parallel=False,
update_function=update_function)
captured = capsys.readouterr()
assert captured.out == "Update Function Call\n"*6
@pytest.mark.skipif('not scipyOK')
def test_parallel_bad_params(data_adv):
# This function only makes sense for the plain SpectralCube class
cube, data = cube_and_raw(data_adv, use_dask=False)
with pytest.raises(ValueError,
match=("parallel execution was not requested, but "
"multiple cores were: these are incompatible "
"options. Either specify num_cores=1 or "
"parallel=True")):
with warnings.catch_warnings():
# FITSFixed warnings can pop up here and break the raises check
warnings.simplefilter('ignore', AstropyWarning)
cube.spectral_smooth_median(3, num_cores=2, parallel=False,
update_function=update_function)
with warnings.catch_warnings(record=True) as wrn:
warnings.simplefilter('ignore', AstropyWarning)
cube.spectral_smooth_median(3, num_cores=1, parallel=True,
update_function=update_function)
assert ("parallel=True was specified but num_cores=1. "
"Joblib will be used to run the task with a "
"single thread.") in str(wrn[-1].message)
def test_initialization_from_units(data_adv, use_dask):
"""
Regression test for issue 447
"""
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
newcube = SpectralCube(data=cube.filled_data[:], wcs=cube.wcs)
assert newcube.unit == cube.unit
def test_varyres_spectra(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
assert isinstance(cube, VaryingResolutionSpectralCube)
sp = cube[:,0,0]
assert isinstance(sp, VaryingResolutionOneDSpectrum)
assert hasattr(sp, 'beams')
sp = cube.mean(axis=(1,2))
assert isinstance(sp, VaryingResolutionOneDSpectrum)
assert hasattr(sp, 'beams')
def test_median_2axis(data_adv, use_dask):
"""
As of this writing the bottleneck.nanmedian did not accept an axis that is a
tuple/list so this test is to make sure that is properly taken into account.
"""
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
cube_median = cube.median(axis=(1, 2))
# Check first slice
result0 = np.array([0.7620573, 0.3086828, 0.3037954, 0.7455546])
np.testing.assert_almost_equal(cube_median.value, result0)
def test_varyres_mask(data_vda_beams, use_dask):
cube, data = cube_and_raw(data_vda_beams, use_dask=use_dask)
cube._beams.major.value[0] = 0.9
cube._beams.minor.value[0] = 0.05
cube._beams.major.value[3] = 0.6
cube._beams.minor.value[3] = 0.09
# mask out one beams
goodbeams = cube.identify_bad_beams(0.5, )
assert all(goodbeams == np.array([False, True, True, True]))
mcube = cube.mask_out_bad_beams(0.5)
assert hasattr(mcube, '_goodbeams_mask')
assert all(mcube.goodbeams_mask == goodbeams)
assert len(mcube.beams) == 3
sp_masked = mcube[:,0,0]
assert hasattr(sp_masked, '_goodbeams_mask')
assert all(sp_masked.goodbeams_mask == goodbeams)
assert len(sp_masked.beams) == 3
try:
assert mcube.unmasked_beams == cube.beams
except ValueError:
# older versions of beams
assert np.all(mcube.unmasked_beams == cube.beams)
try:
# check that slicing works too
assert mcube[:5].unmasked_beams == cube[:5].beams
except ValueError:
assert np.all(mcube[:5].unmasked_beams == cube[:5].beams)
def test_mask_none(use_dask):
# Regression test for issues that occur when mask is None
data = np.arange(24).reshape((2, 3, 4))
wcs = WCS(naxis=3)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VELO-HEL']
cube = SpectralCube(data * u.Jy / u.beam, wcs=wcs, use_dask=use_dask)
assert_quantity_allclose(cube[0, :, :],
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] * u.Jy / u.beam)
assert_quantity_allclose(cube[:, 0, 0],
[0, 12] * u.Jy / u.beam)
@pytest.mark.parametrize('filename', ['data_vda', 'data_vda_beams'],
indirect=['filename'])
def test_mask_channels_preserve_mask(filename, use_dask):
# Regression test for a bug that caused the mask to not be preserved.
cube, data = cube_and_raw(filename, use_dask=use_dask)
# Add a mask to the cube
mask = np.ones(cube.shape, dtype=bool)
mask[:, ::2, ::2] = False
cube = cube.with_mask(mask)
# Mask by channels
cube = cube.mask_channels([False, True, False, True])
# Check final mask is a combination of both
expected_mask = mask.copy()
expected_mask[::2] = False
np.testing.assert_equal(cube.mask.include(), expected_mask)
def test_minimal_subcube(use_dask):
if not use_dask:
pytest.importorskip('scipy')
data = np.arange(210, dtype=float).reshape((5, 6, 7))
data[0] = np.nan
data[2] = np.nan
data[4] = np.nan
data[:,0] = np.nan
data[:,3:4] = np.nan
data[:, :, 0:2] = np.nan
data[:, :, 4:7] = np.nan
wcs = WCS(naxis=3)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VELO-HEL']
cube = SpectralCube(data * u.Jy / u.beam, wcs=wcs, use_dask=use_dask)
cube = cube.with_mask(np.isfinite(data))
subcube = cube.minimal_subcube()
assert subcube.shape == (3, 5, 2)
def test_minimal_subcube_nomask(use_dask):
if not use_dask:
pytest.importorskip('scipy')
data = np.arange(210, dtype=float).reshape((5, 6, 7))
wcs = WCS(naxis=3)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VELO-HEL']
cube = SpectralCube(data * u.Jy / u.beam, wcs=wcs, use_dask=use_dask)
# verify that there is no mask
assert cube._mask is None
# this should not raise an Exception
subcube = cube.minimal_subcube()
# shape is unchanged
assert subcube.shape == (5, 6, 7)
def test_regression_719(data_adv, use_dask):
"""
Issue 719: exception raised when checking for beam
"""
cube, data = cube_and_raw(data_adv, use_dask=use_dask)
# force unit for use below
cube._unit = u.Jy/u.beam
assert hasattr(cube, 'beam')
slc = cube[0,:,:]
# check that the hasattr tests work
from .. cube_utils import _has_beam, _has_beams
assert _has_beam(slc)
assert not _has_beams(slc)
# regression test: full example that broke
mx = cube.max(axis=0)
beam = cube.beam
cfrq = 100*u.GHz
# This should not raise an exception
mx_K = (mx*u.beam).to(u.K,
u.brightness_temperature(beam_area=beam,
frequency=cfrq))
| keflavich/spectral-cube | spectral_cube/tests/test_spectral_cube.py | Python | bsd-3-clause | 96,662 |
"""The gearbest component."""
| jnewland/home-assistant | homeassistant/components/gearbest/__init__.py | Python | apache-2.0 | 30 |
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import os.path
from Common import CommonVariables
from ConfigParser import ConfigParser
from ConfigUtil import ConfigUtil
from ConfigUtil import ConfigKeyValuePair
class EncryptionConfig(object):
def __init__(self, encryption_environment,logger):
self.encryptionEnvironment = encryption_environment
self.passphrase_file_name = None
self.bek_filesystem = None
self.volume_type = None
self.secret_id = None
self.encryption_config = ConfigUtil(encryption_environment.encryption_config_file_path,'azure_crypt_config',logger)
def config_file_exists(self):
return self.encryption_config.config_file_exists()
def get_bek_filename(self):
return self.encryption_config.get_config(CommonVariables.PassphraseFileNameKey)
def get_bek_filesystem(self):
return self.encryption_config.get_config(CommonVariables.BekVolumeFileSystemKey)
def get_secret_id(self):
return self.encryption_config.get_config(CommonVariables.SecretUriKey)
def commit(self):
key_value_pairs = []
command = ConfigKeyValuePair(CommonVariables.PassphraseFileNameKey,self.passphrase_file_name)
key_value_pairs.append(command)
bek_file_system = ConfigKeyValuePair(CommonVariables.BekVolumeFileSystemKey,CommonVariables.BekVolumeFileSystem)
key_value_pairs.append(bek_file_system)
parameters = ConfigKeyValuePair(CommonVariables.SecretUriKey,self.secret_id)
key_value_pairs.append(parameters)
self.encryption_config.save_configs(key_value_pairs) | thomas1206/azure-linux-extensions | VMEncryption/main/EncryptionConfig.py | Python | apache-2.0 | 2,233 |
# -*- coding: utf-8 -*-
"""
Pearson Correlation model: Inferring a correlation coefficient.
Chapter 5.1, Bayesian Cognitive Modeling.
Created Aug/2015 by Johannes Keyser <j.keyser@donders.ru.nl>
TODO: Not running yet, because of matrix manipulation mysteries in PyMC3/Theano.
"""
import pymc3 as pm
import numpy as np
import pandas as pd
import theano.tensor as tt
dataset = 1 # choose data set 1 or 2 (where 2 is just the first, twice)
data1 = np.array([[0.8, 102], [1.0, 98], [0.5, 100], [0.9, 105], [0.7, 103],
[0.4, 110], [1.2, 99], [1.4, 87], [0.6, 113], [1.1, 89],
[1.3, 93]])
if dataset == 1:
x = data1
elif dataset == 2:
x = np.vstack((data1, data1))
# from help(tt.stacklists), but doesn't work at all!
#import theano.function
#a, b, c, d = tt.scalars('abcd')
#X = tt.stacklists([[a, b], [c, d]])
#f = theano.function([a, b, c, d], X)
model = pm.Model()
with model:
# priors
mu = pm.Normal('mu', mu=0, tau=1/100**2, shape=(2,1))
lmbda = pm.Gamma('lambda', alpha=0.001, beta=0.001, shape=(2,1))
r = pm.Uniform('r', lower=-1, upper=1)
sigma = pm.Deterministic('sigma', tt.sqrt(1/lmbda))
# Reparameterization
#FIXME: How to create (and then inverse) a simple 2x2 matrix???
T = tt.stacklists([[1/lmbda[0] , r*sigma[0]*sigma[1]],
[r*sigma[0]*sigma[1], 1/lmbda[1]]])
# T = tt.stack([1/lmbda[0] , r*sigma[0]*sigma[1],
# r*sigma[0]*sigma[1], 1/lmbda[1]])
# TI = tt.invert(T)
# TI = tt.matrix(T)
# TODO? Side-step inversion by doing it myself, i.e. 1/det(A)*reshuffle(A)?
testtau = pm.constant(np.eye(2)) # works...
pm.det(testtau) # works...
x = pm.MvNormal('x', mu=0, tau=testtau)
# # Reparameterization
# sigma[1] <- 1/sqrt(lambda[1])
# sigma[2] <- 1/sqrt(lambda[2])
# T[1,1] <- 1/lambda[1]
# T[1,2] <- r*sigma[1]*sigma[2]
# T[2,1] <- r*sigma[1]*sigma[2]
# T[2,2] <- 1/lambda[2]
# TI[1:2,1:2] <- inverse(T[1:2,1:2])
# data come from a Gaussian
# x = pm.Normal('x', mu=mu, sd=sigma, observed=x)
# instantiate sampler
stepFunc = pm.Metropolis() # or try pm.NUTS()
# draw posterior samples (in 4 parallel running chains)
Nsample = 1000
Nchains = 2
traces = pm.sample(Nsample, step=stepFunc, njobs=Nchains)
plotVars = ('mu','sigma')
axs = pm.traceplot(traces, vars=plotVars, combined=False)
# plot joint posterior samples
tstr = 'Joint posterior samples'
post = np.vstack([traces['mu'], traces['sigma']])
post = post.transpose()
df = pd.DataFrame(post, columns=plotVars)
ax = df.plot(kind='scatter', x=plotVars[0], y=plotVars[1], alpha=.1, title=tstr) | JoKeyser/BCMinPyMC3 | ch5-1_Correlation1.py | Python | gpl-3.0 | 2,683 |
#! /usr/bin/env python
# do nothing as NMRPipe goes into inf loop.
| google-code-export/nmrglue | tests/pipe_proc_tests/dev.py | Python | bsd-3-clause | 68 |
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import rand
from time import time
"""
INPUT: quantized mains fdiff
OUTPUT: appliance fdiff
Code taken from Lasagne and nolearn!
"""
SEQ_LENGTH = 400
N_HIDDEN = 5
N_SEQ_PER_BATCH = 30 # Number of sequences per batch
LEARNING_RATE = 1e-1 # SGD learning rate
N_ITERATIONS = 100 # Number of training iterations
N_INPUT_FEATURES = 10
N_OUTPUTS = 1
input_shape = (N_SEQ_PER_BATCH, SEQ_LENGTH, N_INPUT_FEATURES)
output_shape = (N_SEQ_PER_BATCH, SEQ_LENGTH, N_OUTPUTS)
############### GENERATE DATA ##############################
def quantized(inp, all_hot=True):
N_BINS = 10
out = np.zeros(shape=(N_SEQ_PER_BATCH, SEQ_LENGTH, N_BINS))
for i_batch in range(N_SEQ_PER_BATCH):
for i_element in range(SEQ_LENGTH):
hist, _ = np.histogram(inp[i_batch, i_element, 0], bins=N_BINS,
range=(-1, 1))
if all_hot:
where = np.where(hist==1)[0][0]
if where > 5:
hist[5:where] = 1
elif where < 5:
hist[where:5] = 1
out[i_batch,i_element,:] = hist
return (out * 2) - 1
def gen_single_appliance(power, on_duration, min_off_duration=20, fdiff=True):
length = SEQ_LENGTH + 1 if fdiff else SEQ_LENGTH
appliance_power = np.zeros(length)
i = 0
while i < length:
if np.random.binomial(n=1, p=0.2):
end = min(i + on_duration, length)
appliance_power[i:end] = power
i += on_duration + min_off_duration
else:
i += 1
return np.diff(appliance_power) if fdiff else appliance_power
def gen_batches_of_single_appliance(*args, **kwargs):
batches = np.zeros(shape=(N_SEQ_PER_BATCH, SEQ_LENGTH, 1))
for i in range(N_SEQ_PER_BATCH):
batches[i, :, :] = gen_single_appliance(*args, **kwargs).reshape(SEQ_LENGTH, 1)
return batches
def gen_unquantized_data(n_appliances=2,
appliance_powers=[10,30],
appliance_on_durations=[10,2], validation=False):
'''Generate a simple energy disaggregation data.
:parameters:
:returns:
- X : np.ndarray, shape=(n_batch, length, 1)
Input sequence
- y : np.ndarray, shape=(n_batch, length, 1)
Target sequence, appliance 1
'''
y = gen_batches_of_single_appliance(power=appliance_powers[0],
on_duration=appliance_on_durations[0])
X = y.copy()
for power, on_duration in zip(appliance_powers, appliance_on_durations)[1:]:
X += gen_batches_of_single_appliance(power=power, on_duration=on_duration)
max_power = np.sum(appliance_powers)
return X / max_power, y / max_power
def gen_data(*args, **kwargs):
X, y = gen_unquantized_data(*args, **kwargs)
return quantized(X), y
class ansi:
# from dnouri/nolearn/nolearn/lasagne.py
BLUE = '\033[94m'
GREEN = '\033[32m'
ENDC = '\033[0m'
######################## Neural network class ########################
class Net(object):
# Much of this code is adapted from craffel/nntools/examples/lstm.py
def __init__(self):
print("Initialising network...")
import theano
import theano.tensor as T
import lasagne
from lasagne.layers import (InputLayer, LSTMLayer, ReshapeLayer,
ConcatLayer, DenseLayer)
theano.config.compute_test_value = 'raise'
# Construct LSTM RNN: One LSTM layer and one dense output layer
l_in = InputLayer(shape=input_shape)
# setup fwd and bck LSTM layer.
l_fwd = LSTMLayer(
l_in, N_HIDDEN, backwards=False, learn_init=True, peepholes=True)
l_bck = LSTMLayer(
l_in, N_HIDDEN, backwards=True, learn_init=True, peepholes=True)
# concatenate forward and backward LSTM layers
concat_shape = (N_SEQ_PER_BATCH * SEQ_LENGTH, N_HIDDEN)
l_fwd_reshape = ReshapeLayer(l_fwd, concat_shape)
l_bck_reshape = ReshapeLayer(l_bck, concat_shape)
l_concat = ConcatLayer([l_fwd_reshape, l_bck_reshape], axis=1)
l_recurrent_out = DenseLayer(l_concat, num_units=N_OUTPUTS,
nonlinearity=None)
l_out = ReshapeLayer(l_recurrent_out, output_shape)
input = T.tensor3('input')
target_output = T.tensor3('target_output')
# add test values
input.tag.test_value = rand(
*input_shape).astype(theano.config.floatX)
target_output.tag.test_value = rand(
*output_shape).astype(theano.config.floatX)
print("Compiling Theano functions...")
# Cost = mean squared error
cost = T.mean((l_out.get_output(input) - target_output)**2)
# Use NAG for training
all_params = lasagne.layers.get_all_params(l_out)
updates = lasagne.updates.nesterov_momentum(cost, all_params, LEARNING_RATE)
# Theano functions for training, getting output, and computing cost
self.train = theano.function(
[input, target_output],
cost, updates=updates, on_unused_input='warn',
allow_input_downcast=True)
self.y_pred = theano.function(
[input], l_out.get_output(input), on_unused_input='warn',
allow_input_downcast=True)
self.compute_cost = theano.function(
[input, target_output], cost, on_unused_input='warn',
allow_input_downcast=True)
print("Done initialising network.")
def training_loop(self):
# column 0 = training cost
# column 1 = validation cost
self.costs = np.zeros(shape=(N_ITERATIONS, 2))
self.costs[:,:] = np.nan
# Generate a "validation" sequence whose cost we will compute
X_val, y_val = gen_data(validation=True)
assert X_val.shape == input_shape
assert y_val.shape == output_shape
# Adapted from dnouri/nolearn/nolearn/lasagne.py
print("""
Epoch | Train cost | Valid cost | Train / Val | Dur per epoch
--------|--------------|--------------|---------------|---------------\
""")
# Training loop
for n in range(N_ITERATIONS):
t0 = time() # for calculating training duration
X, y = gen_data()
train_cost = self.train(X, y).flatten()[0]
validation_cost = self.compute_cost(X_val, y_val).flatten()[0]
self.costs[n] = train_cost, validation_cost
if n==N_ITERATIONS-1 or not n % 10:
duration = time() - t0
is_best_train = train_cost == np.nanmin(self.costs[:,0])
is_best_valid = validation_cost == np.nanmin(self.costs[:,1])
print(" {:>5} | {}{:>10.6f}{} | {}{:>10.6f}{} |"
" {:>11.6f} | {:>3.1f}s".format(
n,
ansi.BLUE if is_best_train else "",
train_cost,
ansi.ENDC if is_best_train else "",
ansi.GREEN if is_best_valid else "",
validation_cost,
ansi.ENDC if is_best_valid else "",
train_cost / validation_cost,
duration
))
def plot_costs(self, ax=None):
if ax is None:
ax = plt.gca()
ax.plot(self.costs[:,0], label='training')
ax.plot(self.costs[:,1], label='validation')
ax.set_xlabel('Iteration')
ax.set_ylabel('Cost')
ax.legend()
plt.show()
return ax
def plot_estimates(self, axes=None):
if axes is None:
_, axes = plt.subplots(2, sharex=True)
X, y = gen_unquantized_data()
y_predictions = self.y_pred(quantized(X))
axes[0].set_title('Appliance forward difference')
axes[0].plot(y_predictions[0,:,0], label='Estimates')
axes[0].plot(y[0,:,0], label='Appliance ground truth')
axes[0].legend()
axes[1].set_title('Aggregate')
axes[1].plot(X[0,:,0], label='Fdiff')
axes[1].plot(np.cumsum(X[0,:,0]), label='Cumsum')
axes[1].legend()
plt.show()
if __name__ == "__main__":
net = Net()
net.training_loop()
net.plot_costs()
net.plot_estimates()
| JackKelly/neuralnilm_prototype | scripts/experiment033.py | Python | mit | 8,485 |
'''
Created on Jul 7, 2017
@author: alvarna
'''
from __future__ import print_function
import codecs
import os
import inspect
from sanskrit_parser.lexical_analyzer.sandhi import Sandhi
from sanskrit_parser.base.sanskrit_base import SanskritObject, SLP1
import logging
import re
import six
import json
logger = logging.getLogger(__name__)
def sandhi_join_pass(sandhiobj, split, join):
objs = map(lambda x: SanskritObject(x, encoding=SLP1), split)
joins = sandhiobj.join(*objs)
d = {
"input": map(to_devanagari, split),
"expected": to_devanagari(join),
"file": filename,
"line": linenum}
if joins and join in joins:
res = True
else:
d["actual"] = map(to_devanagari, joins) if joins else None
res = False
s = json.dumps(d, ensure_ascii=False) + "\n"
return (res, s)
def sandhi_split_pass(sandhiobj, split, join):
splits = sandhiobj.split_all(SanskritObject(join, encoding=SLP1))
d = {
"input": to_devanagari(join),
"expected": map(to_devanagari, split),
# "actual": map(to_devanagari, splits) if splits else None,
"file": filename,
"line": linenum}
if splits and split in splits:
res = True
else:
res = False
s = json.dumps(d, ensure_ascii=False) + "\n"
return (res, s)
def load_reference_data():
files = [
'refs.txt',
'2.karnabhara-ext.txt',
'130-short-stories-extracted.txt',
'vetalkatha_ext.txt',
'4.dutaghatotgajam-ext.txt',
'3.dutavakyam-ext.txt',
'madyama_ext.txt',
'vrubhangam_ext.txt',
'balaramayanam_ext.txt',
'5.balacharitham-ext.txt',
'1.abhishakanatakam-ext.txt',
'7.charudattam-ext.txt',
'vinodini-ext.txt',
'astanga-hridayam-sandhi-extract1-27.txt',
'madhavi-ext.txt',
'manjusa-ext.txt',
'tarkabhasha-ext.txt',
'Rajkathakunj_ext.txt',
'Aakhyanvallari_ext.txt',
'sanskritkathashatkam1_ext.txt',
'nyayasara-ext.txt',
'tarkchudamani-ext.txt',
'Sanskritkathakunj_ext.txt',
'agnipuran-1-111-sandhi_ext.txt',
'vyutpattivada-ext.txt'
]
sandhi_references = []
base_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
directory = os.path.join(base_dir, "sandhi_test_data")
for filename in files:
sandhi_references.extend(load_reference_data_from_file(os.path.join(directory, filename)))
return sandhi_references
def clean_references(splits, full):
def _dumpchars(str):
# Remove whitespace characters
s = re.sub(r"\W+", "", str)
# Random characters in UOHD files
for c in ",'-;().?!\"0123456789":
s = s.replace(c, '')
# Some bad visargas
s = s.replace(':', 'H')
# UOHD RHS has word-ending anusvaras
s = re.sub('M$', 'm', s)
return s
full = _dumpchars(full)
splits = list(map(_dumpchars, splits))
if splits[-1] == '':
splits.pop()
if splits[0] == '':
splits.pop(0)
if len(splits) != 2:
return None
# UOHD errors, final visarga is sometimes missing
if len(splits[-1]) > 1 and splits[-1][-2:] == "AH" and \
full[-1] == "A":
full = full + "H"
if len(splits[-1]) > 1 and splits[-1][-2:] == "aH" and \
full[-1] == "a":
full = full + "H"
if splits[-1][-1] == "A" and len(full) > 1 and full[-2:] == "AH":
splits[-1] = splits[-1] + "H"
if splits[-1][-1] == "a" and len(full) > 1 and full[-2:] == "aH":
splits[-1] = splits[-1] + "H"
# UOHD stores sandhied final words!
# This is not a full fix
full = re.sub("o$", "aH", full)
full = re.sub("d$", "t", full)
return splits, full
def load_reference_data_from_file(filename):
sandhi_references = []
basename = os.path.basename(filename)
logger.debug("Processing tests from file %s", basename)
with codecs.open(filename, "rb", 'utf-8') as f:
for linenum, line in enumerate(f):
line = line.strip()
if line.startswith('#') or line == '':
continue
ref = SanskritObject(line).transcoded(SLP1)
if "=>" in line:
joined, splits = map(six.text_type.strip, ref.split("=>"))
elif "=" in line:
splits, joined = map(six.text_type.strip, ref.split("="))
else:
continue
split = list(map(six.text_type.strip, splits.split('+')))
clean = clean_references(split, joined)
if clean:
split, joined = clean
sandhi_references.append((tuple(split), joined, basename, linenum + 1))
return sandhi_references
def to_devanagari(obj):
if isinstance(obj, (six.text_type, six.string_types)):
obj = SanskritObject(obj, encoding=SLP1)
if isinstance(obj, SanskritObject):
return obj.devanagari()
else:
return map(to_devanagari, obj)
if __name__ == "__main__":
base_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
directory = os.path.join(base_dir, "sandhi_test_data")
join_passing = codecs.open(os.path.join(directory, "sandhi_join_passing.txt"), "w", encoding='utf-8')
join_failing = codecs.open(os.path.join(directory, "sandhi_join_failing.txt"), "w", encoding='utf-8')
split_passing = codecs.open(os.path.join(directory, "sandhi_split_passing.txt"), "w", encoding='utf-8')
split_failing = codecs.open(os.path.join(directory, "sandhi_split_failing.txt"), "w", encoding='utf-8')
sandhiobj = Sandhi()
num_join_pass, num_join_fail, num_split_pass, num_split_fail = 0, 0, 0, 0
for split, join, filename, linenum in load_reference_data():
(res, s) = sandhi_join_pass(sandhiobj, split, join)
if res:
join_passing.write(s)
num_join_pass += 1
else:
join_failing.write(s)
num_join_fail += 1
(res, s) = sandhi_split_pass(sandhiobj, split, join)
if res:
split_passing.write(s)
num_split_pass += 1
else:
split_failing.write(s)
num_split_fail += 1
join_passing.close()
join_failing.close()
split_failing.close()
split_failing.close()
print("Join:")
print("Pass: {0} / {2} Fail: {1} / {2}".format(num_join_pass, num_join_fail, (num_join_fail + num_join_pass)))
print("Split:")
print("Pass: {0} / {2} Fail: {1} / {2}".format(num_split_pass, num_split_fail, (num_split_fail + num_split_pass)))
| kmadathil/sanskrit_parser | tests/generate_sandhi_pass_fail.py | Python | mit | 6,680 |
from django.apps import AppConfig
class LauncherConfig(AppConfig):
name = 'launcher'
| hikelee/launcher | launcher/apps.py | Python | mit | 91 |
from opencog.atomspace import types, TruthValue, get_type_name
import formulas
from pln.rule import Rule
'''
Some Rules evaluate various kinds of logical links based explicitly on
set membership. A set = a ConceptNode. Other Rules calculate them
heuristically, based on set probabilities and logical links.
'''
# Todo: try to separate these rules further into several files by
# category. The rules in this file were under the header 'inheritance
# rules' in rules.py, but may need to be further classified.
__VERBOSE__ = False
BOOLEAN_LINKS = [types.AndLink,
types.OrLink,
types.NotLink]
FIRST_ORDER_LINKS = [types.InheritanceLink,
types.SubsetLink,
types.IntensionalInheritanceLink,
types.SimilarityLink,
types.ExtensionalSimilarityLink,
types.IntensionalSimilarityLink]
HIGHER_ORDER_LINKS = [types.ImplicationLink,
types.EquivalenceLink]
class InversionRule(Rule):
"""
A->B entails B->A
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name = "InversionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [B, A])],
inputs=[chainer.link(link_type, [A, B]), A, B],
formula=formulas.inversionFormula)
class DeductionRule(Rule):
"""
A->B, B->C entails A->C
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name = "DeductionRule<%s>"%(get_type_name(link_type),),
formula=formulas.deductionIndependenceBasedFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C]),
B,
C])
# Todo: It doesn't have the right formula
class DeductionGeometryRule(Rule):
"""
A->B, B->C entails A->C. Uses concept geometry.
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name="DeductionGeometryRule<%s>"%(get_type_name(link_type),),
formula=formulas.deductionGeometryFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C])])
# TODO add macro-rules for Abduction and Induction based on Deduction
# and Inversion
'''
deduction
S is M, M is L, then S is L
induction
M is S, M is L, then S is L
invert same same
abduction
S is M, L is M, then S is L
invert
'''
class InductionRule(Rule):
"""
M->S, M->L, S->L
"""
def __init__(self, chainer, link_type):
S = chainer.new_variable()
M = chainer.new_variable()
L = chainer.new_variable()
Rule.__init__(self,
name="InductionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [S, L])],
inputs=[chainer.link(link_type, [M, S]),
chainer.link(link_type, [M, L]), S, M, L],
formula=formulas.inductionFormula)
class AbductionRule(Rule):
"""
S is M, L is M, S->L
"""
def __init__(self, chainer, link_type):
S = chainer.new_variable()
M = chainer.new_variable()
L = chainer.new_variable()
Rule.__init__(self,
name="AbductionRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [S, L])],
inputs=[chainer.link(link_type, [S, M]),
chainer.link(link_type, [L, M]), S, M, L],
formula=formulas.abductionFormula)
class TransitiveSimilarityRule(Rule):
"""
Similarity A B, Similarity B C => Similarity A C
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
Rule.__init__(self,
name="TransitiveSimilarityRule<%s>"%(get_type_name(link_type),),
formula=formulas.transitiveSimilarityFormula,
outputs=[chainer.link(link_type, [A, C])],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [B, C]),
A, B, C])
class PreciseModusPonensRule(Rule):
"""
Given P(A->B) and P(NOT(A)->B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
notA = chainer.link(types.NotLink, [A])
Rule.__init__(self,
name="PreciseModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
chainer.link(link_type, [notA, B]),
A],
formula=formulas.preciseModusPonensFormula)
class ModusPonensRule(Rule):
"""
Given P(A->B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="ModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
A],
formula=formulas.modusPonensFormula)
class SymmetricModusPonensRule(Rule):
"""
Given (Similarity A B) and sA, estimate sB
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="SymmetricModusPonensRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[chainer.link(link_type, [A, B]),
A],
formula=formulas.symmetricModusPonensFormula)
class TermProbabilityRule(Rule):
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
AB = chainer.link(link_type, [A, B])
BA = chainer.link(link_type, [B, A])
Rule.__init__(self,
name="TermProbabilityRule<%s>"%(get_type_name(link_type),),
outputs=[B],
inputs=[AB, BA, A],
formula=formulas.termProbabilityFormula)
class InheritanceRule(Rule):
"""
Create a (mixed) InheritanceLink based on the SubsetLink and
IntensionalInheritanceLink (based on the definition of mixed
InheritanceLinks)
"""
def __init__(self, chainer):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
outputs=[chainer.link(types.InheritanceLink, [A, B])],
inputs=[chainer.link(types.SubsetLink, [A, B]),
chainer.link(types.IntensionalInheritanceLink,
[A, B])],
formula=formulas.inheritanceFormula)
class SimilarityRule(Rule):
"""
SimilarityLink A B
|A and B| / |A or B|
"""
def __init__(self, chainer):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
outputs=[chainer.link(types.SimilarityLink, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
chainer.link(types.OrLink, [A, B])],
formula=formulas.extensionalSimilarityFormula)
class SubsetRule1(Rule):
"""
SubsetLink A B
|A and B| / |A|
= P(B|A)
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="SubsetRule<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
A],
formula=formulas.subsetFormula)
class AndToSubsetRule1(Rule):
"""
SubsetLink A B
|A and B| / |A|
= P(B|A)
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
Rule.__init__(self,
name="AndToSubsetRule1<%s>"%(get_type_name(link_type),),
outputs=[chainer.link(link_type, [A, B])],
inputs=[chainer.link(types.AndLink, [A, B]),
A],
formula=formulas.subsetFormula)
class AndToSubsetRuleN(Rule):
"""
SubsetLink And(A B C) D
|And(A B C D)| / |And A B C|
= P(B|A)
"""
def __init__(self, chainer, link_type, N):
vars = chainer.make_n_variables(N)
lhs = chainer.link(types.AndLink, vars[:-1])
rhs = vars[-1]
Rule.__init__(self,
name="AndToSubsetRuleN<%s,%s>"%(get_type_name(link_type),N),
outputs=[chainer.link(link_type, [lhs, rhs])],
inputs=[chainer.link(types.AndLink, vars),
lhs],
formula=formulas.subsetFormula)
class AndAs1stArgInsideLinkRule(Rule):
"""
ANDLink
InheritanceLink A C
InheritanceLink B C
|-
InheritanceLink
ANDLink A B
C
Created to create AndLinks inside InheritanceLinks (original use case:
context rules); could be useful for other link types as well
@see: https://github.com/opencog/opencog/pull/904
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
AndAB = chainer.link(types.AndLink, [A, B])
Rule.__init__(self,
name="AndAs1stArgInsideLinkRule<%s>"
%(get_type_name(link_type)),
inputs=[C, chainer.link(link_type, [A, C]),
chainer.link(link_type, [B, C]), A, B],
outputs=[chainer.link(link_type, [AndAB, C]),
AndAB],
formula=formulas.andAs1stArgInsideLinkFormula)
class AndAs2ndArgInsideLinkRule(Rule):
"""
ANDLink
InheritanceLink A B
InheritanceLink A C
|-
InheritanceLink
A
ANDLink B C
"""
def __init__(self, chainer, link_type):
A = chainer.new_variable()
B = chainer.new_variable()
C = chainer.new_variable()
AndBC = chainer.link(types.AndLink, [B, C])
Rule.__init__(self,
name="AndAs2ndArgInsideLinkRule<%s>"
%(get_type_name(link_type)),
inputs=[chainer.link(types.InheritanceLink, [A, B]),
chainer.link(types.InheritanceLink, [A, C]),
A, B, C],
outputs=[chainer.link(types.InheritanceLink, [A, AndBC]),
AndBC],
formula=formulas.andAs2ndArgInsideLinkFormula)
| printedheart/opencog | opencog/python/pln_old/rules/inheritance_rules.py | Python | agpl-3.0 | 11,834 |
import pytest
from pygeoid.constants.solar_system_gm import get_body_gm, gm_moon
def test_get_body_gm():
with pytest.raises(ValueError):
body = get_body_gm('no_name_body')
body_gm = get_body_gm('moon')
assert gm_moon == body_gm
| ioshchepkov/pygeoid | pygeoid/constants/test/test_solar_system_gm.py | Python | mit | 253 |
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
print("[x] Sent 'hello World'!")
connection.close()
| peter-wangxu/python_play | rabbitmq_test/hello_world/send.py | Python | apache-2.0 | 358 |
from z3 import BitVecVal, BV2Int, If, LShR, UDiv, ULT, UGT, URem
def ADD(x, y):
return x + y
def MUL(x, y):
return x * y
def SUB(x, y):
return x - y
def DIV(x, y):
return If(y == 0, 0, UDiv(x, y))
def SDIV(x, y):
return If(y == 0, 0, x / y)
def MOD(x, y):
return If(y == 0, 0, URem(x, y))
def SMOD(x, y):
return If(y == 0, 0, x % y)
def LT(x, y):
return If(ULT(x, y), BitVecVal(1, x.size()), BitVecVal(0, x.size()))
def GT(x, y):
return If(UGT(x, y), BitVecVal(1, x.size()), BitVecVal(0, x.size()))
def SLT(x, y):
return If(x < y, BitVecVal(1, x.size()), BitVecVal(0, x.size()))
def SGT(x, y):
return If(x > y, BitVecVal(1, x.size()), BitVecVal(0, x.size()))
def EQ(x, y):
return If(x == y, BitVecVal(1, x.size()), BitVecVal(0, x.size()))
def ISZERO(x):
return If(x == 0, BitVecVal(1, x.size()), BitVecVal(0, x.size()))
def AND(x, y):
return x & y
def OR(x, y):
return x | y
def NOT(x):
return ~(x)
def SHL(x, y):
return y << x
def SHR(x, y):
return LShR(y, x)
def SAR(x, y):
return y >> x
def BYTE(i, x):
bit = (i + 1) * 8
return If(
UGT(i, x.size() / 8 - 1),
BitVecVal(0, x.size()),
(LShR(x, (x.size() - bit))) & 0xff
)
def SIGNEXTEND(i, x):
bitBV = i * 8 + 7
bitInt = BV2Int(i) * 8 + 7
test = BitVecVal(1, x.size()) << bitBV
mask = test - 1
return If(
bitInt >= x.size(),
x,
If(
(x & test) == 0,
x & mask,
x | ~mask
)
)
| ethereum/solidity | test/formal/opcodes.py | Python | gpl-3.0 | 1,395 |
"""
Parser for silme-compatible translation formats.
"""
import codecs
import silme
from collections import OrderedDict
from copy import copy
from silme.format.dtd import FormatParser as DTDParser
from silme.format.ini import FormatParser as IniParser
from silme.format.inc import FormatParser as IncParser
from silme.format.properties import FormatParser as PropertiesParser
from pontoon.sync.exceptions import ParseError, SyncError
from pontoon.sync.utils import (
create_parent_directory,
escape_quotes,
unescape_quotes,
)
from pontoon.sync.formats.base import ParsedResource
from pontoon.sync.vcs.models import VCSTranslation
class SilmeEntity(VCSTranslation):
def __init__(self, silme_object, comments=None, order=0, copy_string=True):
"""
:param copy_string:
If True, copy the string from the silme_object. Otherwise,
self.strings will be an empty dict. Used for creating empty
copies of translations from source resources.
"""
self.silme_object = silme_object
self.comments = comments or []
self.order = order
if copy_string:
self.strings = {None: self.silme_object.value}
else:
self.strings = {}
@property
def key(self):
return self.silme_object.id
@property
def context(self):
return self.key
@property
def source_string(self):
return self.silme_object.value
@property
def source_string_plural(self):
return ""
@property
def fuzzy(self):
return False
@fuzzy.setter
def fuzzy(self, fuzzy):
pass # We don't use fuzzy in silme
@property
def source(self):
return []
def __eq__(self, other):
return self.key == other.key and self.strings.get(None) == other.strings.get(
None
)
def __ne__(self, other):
return not self.__eq__(other)
def __bool__(self):
# python 3
return bool(self.strings)
class SilmeResource(ParsedResource):
def __init__(self, parser, path, source_resource=None):
self.parser = parser
self.path = path
self.source_resource = source_resource
self.entities = OrderedDict() # Preserve entity order.
# Bug 1193860: unescape quotes in some files
self.escape_quotes_on = "mobile/android/base" in path and parser is DTDParser
# Copy entities from the source_resource if it's available.
if source_resource:
for key, entity in source_resource.entities.items():
self.entities[key] = copy_source_entity(entity)
try:
# Only uncomment MOZ_LANGPACK_CONTRIBUTORS if this is a .inc
# file and a source resource (i.e. it has no source resource
# itself).
self.structure = parser.get_structure(
read_file(
path,
uncomment_moz_langpack=parser is IncParser and not source_resource,
)
)
# Parse errors are handled gracefully by silme
# No need to catch them here
except OSError as err:
# If the file doesn't exist, but we have a source resource,
# we can keep going, we'll just not have any translations.
if source_resource:
return
else:
raise ParseError(err)
comments = []
current_order = 0
for obj in self.structure:
if isinstance(obj, silme.core.entity.Entity):
if self.escape_quotes_on:
obj.value = unescape_quotes(obj.value)
entity = SilmeEntity(obj, comments, current_order)
self.entities[entity.key] = entity
current_order += 1
comments = []
elif isinstance(obj, silme.core.structure.Comment):
for comment in obj:
# Silme groups comments together, so we strip
# whitespace and split them up.
lines = str(comment).strip().split("\n")
comments += [line.strip() for line in lines]
@property
def translations(self):
return list(self.entities.values())
def save(self, locale):
"""
Load the source resource, modify it with changes made to this
Resource instance, and save it over the locale-specific
resource.
"""
if self.source_resource is None:
raise SyncError(
"Cannot save silme resource {}: No source resource given.".format(
self.path
)
)
# Only uncomment MOZ_LANGPACK_CONTRIBUTORS if we have a
# translation for it
new_structure = self.parser.get_structure(
read_file(
self.source_resource.path,
uncomment_moz_langpack=self.entities.get(
"MOZ_LANGPACK_CONTRIBUTORS", False
),
)
)
# Update translations in the copied resource.
entities = [
SilmeEntity(obj)
for obj in new_structure
if isinstance(obj, silme.core.entity.Entity)
]
for silme_entity in entities:
key = silme_entity.key
translated_entity = self.entities.get(key)
if translated_entity and None in translated_entity.strings:
translation = translated_entity.strings[None]
if self.escape_quotes_on:
translation = escape_quotes(translation)
new_structure.modify_entity(key, translation)
else:
# Remove untranslated entity and following newline
pos = new_structure.entity_pos(key)
new_structure.remove_entity(key)
try:
line = new_structure[pos]
except IndexError:
# No newline at end of file
continue
if isinstance(line, str) and line.startswith("\n"):
line = line[len("\n") :]
new_structure[pos] = line
if len(line) == 0:
new_structure.remove_element(pos)
# Temporary fix for bug 1236281 until bug 721211 lands
if (
self.path.endswith("browser/chrome/browser/browser.properties")
and locale.code == "zh-CN"
):
new_entity = silme.core.entity.Entity(
"browser.startup.homepage", "https://start.firefoxchina.cn"
)
new_structure.add_entity(new_entity)
new_structure.add_string("\n")
create_parent_directory(self.path)
with codecs.open(self.path, "w", "utf-8") as f:
f.write(self.parser.dump_structure(new_structure))
def read_file(path, uncomment_moz_langpack=False):
"""Read the resource at the given path."""
with codecs.open(path, "r", "utf-8") as f:
# .inc files have a special commented-out entity called
# MOZ_LANGPACK_CONTRIBUTORS. We optionally un-comment it before
# parsing so locales can translate it.
if uncomment_moz_langpack:
lines = []
for line in f:
if line.startswith("# #define MOZ_LANGPACK_CONTRIBUTORS"):
line = line[2:]
lines.append(line)
content = "".join(lines)
else:
content = f.read()
return content
def copy_source_entity(entity):
"""
Copy an entity from a source file to a new SilmeEntity instance.
The new copy will have an empty strings attribute so that entities
that are copied but not modified during sync will not be saved in
the translated resource.
"""
return SilmeEntity(
entity.silme_object,
copy(entity.comments), # Members are strings, shallow is fine.
entity.order,
copy_string=False,
)
def parse(parser, path, source_path=None, locale=None):
# TODO: Cache the source resource to avoid re-parsing it a bunch.
if source_path is not None:
source_resource = SilmeResource(parser, source_path)
else:
source_resource = None
return SilmeResource(parser, path, source_resource=source_resource)
def parse_properties(path, source_path=None, locale=None):
return parse(PropertiesParser, path, source_path)
def parse_ini(path, source_path=None, locale=None):
return parse(IniParser, path, source_path)
def parse_inc(path, source_path=None, locale=None):
return parse(IncParser, path, source_path)
def parse_dtd(path, source_path=None, locale=None):
return parse(DTDParser, path, source_path)
| mathjazz/pontoon | pontoon/sync/formats/silme.py | Python | bsd-3-clause | 8,834 |
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
HIPCHAT_BACKEND = getattr(settings, 'HIPCHAT_BACKEND',
'djhipchat.backends.locmem.HipChatBackend')
def get_backend(backend=None, **kwargs):
path = backend or settings.HIPCHAT_BACKEND
try:
mod_name, klass_name = path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError as e:
raise ImproperlyConfigured(
('Error importing HipChat backend module %s: "%s"' %
(mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
return klass(**kwargs)
def send_message(room_id, message, sender=None,
message_format='html', notify=False,
color='yellow'):
"""
Sends a message to HipChat.
:param room_id: The ID of the Room to send to.
:param sender: The text name of the sender.
:param message: The text or HTML of the message.
:param message_format: 'text' or 'html'.
:param notify: Whether to trigger a notification for users in the room.
:param color: The color of the message.
"""
sender = (sender or
getattr(settings, 'HIPCHAT_DEFAULT_SENDER', '') or
'Django')
return get_backend().send_message(room_id, message, sender,
message_format, notify, color)
| paulcwatts/djhipchat2 | djhipchat/__init__.py | Python | bsd-3-clause | 1,669 |
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt
from PyQt5.QtWidgets import QDialog
from urh.signalprocessing.Filter import Filter, FilterType
from urh.ui.ui_filter_dialog import Ui_FilterDialog
class FilterDialog(QDialog):
filter_accepted = pyqtSignal(Filter)
def __init__(self, dsp_filter: Filter, parent=None):
super().__init__(parent)
self.ui = Ui_FilterDialog()
self.ui.setupUi(self)
self.setWindowFlags(Qt.Window)
self.error_message = ""
self.set_dsp_filter_status(dsp_filter.filter_type)
self.create_connects()
def set_dsp_filter_status(self, dsp_filter_type: FilterType):
if dsp_filter_type == FilterType.moving_average:
self.ui.radioButtonMovingAverage.setChecked(True)
self.ui.lineEditCustomTaps.setEnabled(False)
self.ui.spinBoxNumTaps.setEnabled(True)
elif dsp_filter_type == FilterType.dc_correction:
self.ui.radioButtonDCcorrection.setChecked(True)
self.ui.lineEditCustomTaps.setEnabled(False)
self.ui.spinBoxNumTaps.setEnabled(False)
else:
self.ui.radioButtonCustomTaps.setChecked(True)
self.ui.spinBoxNumTaps.setEnabled(True)
self.ui.lineEditCustomTaps.setEnabled(True)
def create_connects(self):
self.ui.radioButtonMovingAverage.clicked.connect(self.on_radio_button_moving_average_clicked)
self.ui.radioButtonCustomTaps.clicked.connect(self.on_radio_button_custom_taps_clicked)
self.ui.radioButtonDCcorrection.clicked.connect(self.on_radio_button_dc_correction_clicked)
self.ui.spinBoxNumTaps.valueChanged.connect(self.set_error_status)
self.ui.lineEditCustomTaps.textEdited.connect(self.set_error_status)
self.ui.buttonBox.accepted.connect(self.on_accept_clicked)
self.ui.buttonBox.rejected.connect(self.reject)
def build_filter(self) -> Filter:
if self.ui.radioButtonMovingAverage.isChecked():
n = self.ui.spinBoxNumTaps.value()
return Filter([1/n for _ in range(n)], filter_type=FilterType.moving_average)
elif self.ui.radioButtonDCcorrection.isChecked():
return Filter([], filter_type=FilterType.dc_correction)
else:
# custom filter
try:
taps = eval(self.ui.lineEditCustomTaps.text())
try:
taps = list(map(float, taps))
self.error_message = ""
return Filter(taps)
except (ValueError, TypeError) as e:
self.error_message = "Error casting taps:\n" + str(e)
return None
except SyntaxError as e:
self.error_message = "Error parsing taps:\n" + str(e)
return None
def set_error_status(self):
dsp_filter = self.build_filter()
if dsp_filter is None:
self.ui.lineEditCustomTaps.setStyleSheet("background: red")
self.ui.lineEditCustomTaps.setToolTip(self.error_message)
elif len(dsp_filter.taps) != self.ui.spinBoxNumTaps.value():
self.ui.lineEditCustomTaps.setStyleSheet("background: yellow")
self.ui.lineEditCustomTaps.setToolTip("The number of the filter taps does not match the configured number of taps. I will use your configured filter taps.")
else:
self.ui.lineEditCustomTaps.setStyleSheet("")
self.ui.lineEditCustomTaps.setToolTip("")
@pyqtSlot(bool)
def on_radio_button_moving_average_clicked(self, checked: bool):
if checked:
self.set_dsp_filter_status(FilterType.moving_average)
@pyqtSlot(bool)
def on_radio_button_custom_taps_clicked(self, checked: bool):
if checked:
self.set_dsp_filter_status(FilterType.custom)
self.set_error_status()
@pyqtSlot(bool)
def on_radio_button_dc_correction_clicked(self, checked: bool):
if checked:
self.set_dsp_filter_status(FilterType.dc_correction)
@pyqtSlot()
def on_accept_clicked(self):
dsp_filter = self.build_filter()
self.filter_accepted.emit(dsp_filter)
self.accept()
| jopohl/urh | src/urh/controller/dialogs/FilterDialog.py | Python | gpl-3.0 | 4,227 |
# -*- coding: utf-8 -*-
import sys
import time
import random
import operator
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.styles import Font, Alignment
from datetime import datetime
from collections import OrderedDict
date_format = "%d.%m.%Y" #Muotoillaan päivämäärän esitysmuoto
db = "exsec.xlsx" #Oletustietokanta, jos käynnistettäessä ei anneta parametrinä muuta
wos = "{}".format(time.strftime("%Y")) #Asettaa
# 1. Tallentaa keikkadataa Excel-tiedostoon
# 2. Lukee keikkadataa Excel-tiedostoon
# 3. Muokkaa keikkadataa Excel-tiedostossa
# 4. Näyttää tekemättömiä tehtäviä datan perusteella
def init_db():
"""Yrittää avata excel-tiedoston asetetuilla otsikoilla, mikäli valmista ei löydy"""
try:
wb = load_workbook(filename = db) #yrittää aukaista tietokannan
except: #jos tietokantaa ei löydy, luo uuden tietokannan
print("Luodaan uusi tietokanta")
wb = Workbook()
ws = wb.active
ws.title = wos
kirjaimet = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M"]
tekstit = ["ID", "Nimi", "Työpäivät", "Työpäiviä", "Tapahtumapaikka", "Toimeksiantaja", "Status", "Laskutusperuste", "Työn määrä (h)", "Laskutettu summa", "Saatu summa", "Artistit", "Artistien määrä"]
leveydet = [7, 20, 40, 20, 20, 20, 20, 20, 20, 20, 20, 250, 20]
align = Alignment(horizontal="center", vertical="center")
fontotsikko, fontteksti = Font(name="Calibri", size=12, bold=True), Font(name="Calibri", size=11, bold=False)
for i in range(0, 13): #käy läpi listat luoden sarakkeiden otsikot
solu = "{}1".format(kirjaimet[i])
ws[solu] = tekstit[i]
ws[solu].alignment = align
ws[solu].font = fontotsikko
ws.column_dimensions[kirjaimet[i]].width = leveydet[i]
wb.save(db)
def get_wb():
"""Yrittää avata keikkadatan Excel-tiedostosta."""
wb = load_workbook(filename = db)
sr = wb[wos]
return wb, sr
def get_value_list(column, tba = True):
"""Luo listan valitun sarakkeen löydetyistä arvoista."""
wb, sr = get_wb()
value_list = [] #alustaa tyhjän listan
for row in range(2, sr.max_column): #Käy läpi arvosarakkeet
value = get_value(column, row, tba) #hakee arvon
if value != None: #Lisää arvon listaan vain jos se ei ole tyhjä
value_list.append(value)
return value_list
def get_value(column, row, tba = True):
"""Hakee annetun solun arvon"""
wb, sr = get_wb()
cell = "{}{}".format(column, row)
value = sr[cell].value
if tba == True: #Oletuksena asettaa tyhjän solun arvoksi TBA
if value == None:
value = "-TBA-"
return value
def set_value(value, cell):
"""Asettaa annetun solun arvon halutuksi"""
wb, sr = get_wb()
align = Alignment(horizontal="center", vertical="center")
fontotsikko = Font(name="Calibri", size=12, bold=True)
fontteksti = Font(name="Calibri", size=11, bold=False)
if sr[cell].value == None: #tarkistaa onko solu tyhjä
sr[cell] = value
sr[cell].alignment = align
sr[cell].font = fontteksti
else: #varoittaa, jos solussa on jo sisältöä
print("Virhe! Solu ei {} ole tyhjä.".format(cell))
while True: #Yrittää avata tiedoston, ja varoittaa jos se on jo auki muualla
try:
wb.save(db)
except PermissionError: #tarkistaa onko tiedosto avoinna muualla
vahvistus = input("Virhe! Tiedosto on jo avoinna. Sulje tiedosto muualta jatkaaksesi.") #input odottaa käyttäjän toimea ennen kuin funktio jatkaa
else:
break #poistuu kun tallentaminen onnistuu
def lisaa_keikka():
"""Kyselee kysymyspatteriston ja tallentaa vastaukset."""
wb, sr = get_wb()
empty_row = sr.max_row + 1
set_ID(empty_row)
set_name(empty_row)
set_dates(empty_row)
set_place(empty_row)
set_company(empty_row)
set_status(empty_row)
set_billing(empty_row)
set_hours(empty_row)
set_bsum(empty_row)
set_gsum(empty_row)
set_artist(empty_row)
def set_ID(row):
uusi_id = False #Oletetaan että luotu ID löytyy jo listasta
id_list = get_value_list("A", False)
if id_list == []:
id = random.randint(1000, 9999) #Luo satunnaisen nelinumeroisen numeron
else:
while uusi_id == False: #Luupataan niin kauan kun ID löytyy listasta
id = random.randint(1000, 9999) #Luo satunnaisen nelinumeroisen numeron
try:
id_list.index(id) #Etsii luotua ID:tä listasta
except ValueError:
uusi_id = True #Poistuu kun syntyy aiemmin käyttämätön ID
set_value(id, "A{}".format(row))
def set_name(row):
tapahtuman_nimi = input("Tapahtuman nimi? ") #Kyselee nimen
set_value(tapahtuman_nimi, "B{}".format(row))
def set_dates(row):
paivat_valmis = False
while paivat_valmis == False:
tyopaivat, tyopaivat_count = "", 0
paivat = input("Työpäivät? ")
if paivat == "":
tyopaivat_count = ""
paivat_valmis = True
else:
paivat = paivat.split(", ")
paivat_count = len(paivat)
for paiva in paivat:
try:
datetime.strptime(paiva, date_format)
except ValueError:
print("Päivämäärä virheellisessä muodossa")
else:
tyopaivat_count += 1
tyopaivat += paiva
if tyopaivat_count < paivat_count:
tyopaivat += ","
if tyopaivat_count == paivat_count:
paivat_valmis = True
set_value(tyopaivat, "C{}".format(row))
set_value(paivat_count, "D{}".format(row))
def set_place(row):
tapahtumapaikka = input("Tapahtumapaikka? ") #Kyselee paikan
set_value(tapahtumapaikka, "E{}".format(row))
def set_company(row):
toimeksiantaja = input("Toimeksiantaja? ") #Kyselee toimeksiantajan
if toimeksiantaja == "lp":
toimeksiantaja = "Livepaletti Oy"
if toimeksiantaja == "ew":
toimeksiantaja = "Eventworks Oy"
set_value(toimeksiantaja, "F{}".format(row))
def set_status(row):
while True:
status = input("Keikan status? ") #Kyselee statuksen
if status == "1":
status = "Ennakkotilattu"
break
elif status == "2":
status = "Tilattu"
break
elif status == "3":
status = "Tuotannossa"
break
elif status == "4":
status = "Laskutettu"
break
elif status == "5":
status = "Valmis"
break
elif status == "" or status == "Ennakkotilattu" or status == "Tilattu" or status == "Tuotannossa" or status == "Laskutettu" or status == "Valmis":
break
else:
print("")
print("Virheellinen status! Anna arvo 1-5.")
print("1: Ennakkotilattu. 2: Tilattu. 3: Tuotannossa. 4: Laskutettu. 5: Valmis.")
print("")
set_value(status, "G{}".format(row))
def set_billing(row):
while True:
laskutusperuste = input("Laskutusperuste? ").lower() #Kyselee toimeksiantajan
if laskutusperuste == "":
break
elif laskutusperuste == "p" or laskutusperuste == "d":
laskutusperuste = "Päivä"
break
elif laskutusperuste == "t" or laskutusperuste == "h":
laskutusperuste = "Tunti"
break
elif laskutusperuste == "u":
laskutusperuste = "Urakka"
break
elif laskutusperuste == "s":
lisatiedot = input("Laskutusperusteen lisätiedot: ")
laskutusperuste = "Sekalainen: %s" % (lisatiedot)
break
else:
print("Virheellinen toiminto!")
print("P (päivä), T (tunti), U (urakka), S (sekalainen) ")
print(" ")
set_value(laskutusperuste, "H{}".format(row))
def set_hours(row):
while True:
tyon_maara = input("Tehtyjen työtuntien määrä? ")
if tyon_maara == "":
break
else:
try:
float(tyon_maara)
except ValueError:
print("Virhe! Et antanut numeroa!")
else:
break
set_value(tyon_maara, "I{}".format(row))
def set_bsum(row):
while True:
laskutettu_summa = input("Laskutettu summa? ")
if laskutettu_summa == "":
break
else:
try:
float(laskutettu_summa)
except ValueError:
print("Virheellinen summa. Syötä numeerinen arvo.")
else:
laskutettu_summa = format(float(laskutettu_summa), ".2f")
break
set_value(laskutettu_summa, "J{}".format(row))
def set_gsum(row):
while True:
saatu_summa = input("Saatu summa? ")
if saatu_summa == "":
break
else:
try:
float(saatu_summa)
except ValueError:
print("Virheellinen summa. Syötä numeerinen arvo.")
else:
saatu_summa = format(float(saatu_summa), ".2f")
break
set_value(saatu_summa, "K{}".format(row))
def set_artist(row):
artistit = input("Artistit? ")
artistitdata = ""
if artistit == "ei":
artistitdata += "Ei artisteja"
elif artistit != "":
artistit = artistit.replace(", ", ",")
artistit_lista = artistit.split(",")
artistit_count = str(len(artistit_lista))
set_value(artistit, "L{}".format(row))
set_value(artistit_count, "M{}".format(row))
def nayta_keikat():
"""Tulostaa tallennetut keikat tiedostosta."""
first_day_list = [] #alustaa listan
id_list, date_list = get_value_list("A", False), get_value_list("C", False)
for date in date_list: #käy läpi päivät
paivat = date.split(",") #hajottaa päivät omaan listaansa
if len(paivat) >= 1: #jos päiviä on annettu
first_day_list.append(paivat[0]) #lisää ekan päivän uuteen listaan
if len(id_list) > len(first_day_list): #tarkistaa onko kaikkii tapahtumiin annettu päivä
for _ in range(len(id_list)-len(first_day_list)): #jos tapahtumasta puuttuu päivä
first_day_list.append(str(time.strftime(date_format))) #näyttää tapahtuman päällimäisenä (tänä päivänä)
id_list.sort(key=dict(zip(id_list, first_day_list)).get) #järjestää id:t päivien mukaan järjestykseen
if len(id_list) == 0: #varoittaa jos tapahtumia ei ole
print("Virhe. Tietokanta '{}' on tyhjä".format(db))
else:
for id in id_list: #tulostaa kaikki tapahtumat, jos tapahtumia on
tulosta_keikka(id)
def tulosta_keikka(id):
"""Tulostaa formatoidusti keikkadatan."""
id_list = get_value_list("A")
rn = id_list.index(int(id)) + 2 #Tietokannan offset listan indexiin
tyot = get_value("C", rn)
if tyot == "-TBA-": #Jos tietoa ei ole annettu, näyttää TBA
eka_tyopaiva, vika_tyopaiva = "-TBA-", "-TBA-"
else:
try: #Yrittää luoda työpäivistä listan
tyot = tyot.split(",")
except AttributeError: #jos on vain yksi päivä
tyot = tyot.strftime(date_format)
eka_tyopaiva, vika_tyopaiva = tyot, tyot
else: #jos on useita päiviä
eka_tyopaiva, vika_tyopaiva = tyot[0], tyot[-1]
artistit = get_value("L", rn)
if artistit != "-TBA-" or artistit != "Ei artisteja": #Tarkistaa jos artistit on kerrottu
artistit = artistit.split(",") #Luo artisteista listan
if len(artistit) >= 4: #näyttää korkeintaan neljä artistia
poiminta = 4
else:
poiminta = len(artistit)
random.shuffle(artistit) #sekoittaa artistit
artistit = ", ".join(artistit[:poiminta]) #luo artisteista tekstin
artistiteksti = "{}kpl, mm. {}.".format(get_value("M", rn), artistit) #muotoilee artistitekstin
else:
artistiteksti = artistit #Jos artisteja ei ole kerrottu
print("")
print("{}: {}. {} - {} @{} via {}.".format(get_value("A", rn), get_value("B", rn), eka_tyopaiva, vika_tyopaiva, get_value("E", rn), get_value("F", rn)))
print("----- Status: {}. Laskutusperuste: {}.".format(get_value("G", rn), get_value("H", rn)))
print("----- Tehty {} päivässä {}h töitä. Laskutettu: {}e, josta saatu: {}e.".format(get_value("D", rn), get_value("I", rn), get_value("J", rn), get_value("K", rn)))
print("----- Artistit: {}".format(artistiteksti))
def muokkaa_keikkaa():
"""Kysyy mitä keikkaa ja mitä tietoa muokataan."""
onko_id, id_list = False, get_value_list("A", False) #Olettaa asioita
while onko_id == False: #pyörittää kunnes luotu ID löytyy tietokannasta
id = input("Syötä olemassaoleva ID: ")
if id.lower() == "l": #takaovi pois funktiosta
onko_id = True
else:
try:
id_list.index(int(id)) #etsii annettua ID:tä tietokannasta
except ValueError: #jos ID:tä ei löydy
print("Virheellinen ID!")
for id in id_list: #Tulostaa listan olemassa olevista ID:stä
rn = id_list.index(int(id)) + 2 #Listan offset tietokantaan
print("{}: {}".format(get_value("A", rn), get_value("B", rn)))
print("")
else: #jos ID löytyy
tulosta_keikka(id)
onko_id = True
while True: #Kun tapahtuma löytyy, niin kysytään miten sitä muokataan
print("")
toiminto = input("Valitse toiminto: ").lower()
if toiminto == "l" or toiminto == "v": #ja taas parikin takaovea takaisin päävalikkoon
print("Muokkaus valmis. Poistutaan.")
break
elif toiminto == "a": #esimerkki muokkaustoiminnosta
print("Muokataan ID:tä")
else: #tulostaa sallitut komennot
print("Virheellinen komento! ")
print("Sallitut kommennot ovat: ")
print("L - Lopeta tai V - Valmis")
def main():
"""Pyörittää päävalikkoa, kunnes käyttäjä haluaa poistua."""
global db #antaa muokata muuttujaa globaalisti
if len(sys.argv) == 2: #tarkistetaan annettiinko parametrinä tiedoston nimeä
db = "{}.xlsx".format(sys.argv[1])
init_db() #Tarkistaa onko tietokantaa olemassa tai luo uudet
print(" ") #Kaikki printit ovat 22 kirjainta pitkiä
print("*** KEIKKASIHTEERI ***")
print("* Päävalikko *")
print(" ")
while True:
print(" ")
valinta = input("Kuinka voin palvella? ").lower() #päävalikon toimintovalinta
if valinta == ("u"):
print("Lisätään uusi keikka. ")
lisaa_keikka()
elif valinta == ("m"):
print("Näytetään keikkalista.")
nayta_keikat()
elif valinta == ("h"):
print("Muokataan keikkaa. ")
muokkaa_keikkaa()
elif valinta == ("l"): #takaovi pois
print("Kiitos käynnistä. ")
break
else: #tulostaa sallitut toiminnot
print("Virheellinen komento! ")
print(" ")
print("Sallitut komennot ovat")
print("*u - Uusi keikka. ")
print("*m - Menneet keikat. ")
print("*h - Hallitse keikkaa.")
print("*l - Lopeta. ")
main()
| oskarijarvelin/exsec | exsec.py | Python | gpl-3.0 | 16,163 |
import csv
import copy as cp
from sklearn.preprocessing import normalize
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA
Data=[]
with open('WineDataSet.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
hold=[]
count=0
count1=0
for row in readCSV:
hold=[]
count = 0
if count1 !=0:
for i in row:
if count != 0:
hold.append(i)
count += 1
Data.append(cp.deepcopy(hold))
count1 += 1
print(Data)
pcaNorm=PCA(n_components=3)
NormData=normalize(Data)
pcaNorm.fit(NormData)
WineDataNorm=pcaNorm.transform(NormData)
irisData = [
[ 5.1, 3.5, 1.4, 0.2 ],
[ 4.9, 3, 1.4, 0.2 ],
[ 4.7, 3.2, 1.3, 0.2 ],
[ 4.6, 3.1, 1.5, 0.2 ],
[ 5, 3.6, 1.4, 0.2 ],
[ 5.4, 3.9, 1.7, 0.4 ],
[ 4.6, 3.4, 1.4, 0.3 ],
[ 5, 3.4, 1.5, 0.2 ],
[ 4.4, 2.9, 1.4, 0.2 ],
[ 4.9, 3.1, 1.5, 0.1 ],
[ 5.4, 3.7, 1.5, 0.2 ],
[ 4.8, 3.4, 1.6, 0.2 ],
[ 4.8, 3, 1.4, 0.1 ],
[ 4.3, 3, 1.1, 0.1 ],
[ 5.8, 4, 1.2, 0.2 ],
[ 5.7, 4.4, 1.5, 0.4 ],
[ 5.4, 3.9, 1.3, 0.4 ],
[ 5.1, 3.5, 1.4, 0.3 ],
[ 5.7, 3.8, 1.7, 0.3 ],
[ 5.1, 3.8, 1.5, 0.3 ],
[ 5.4, 3.4, 1.7, 0.2 ],
[ 5.1, 3.7, 1.5, 0.4 ],
[ 4.6, 3.6, 1, 0.2 ],
[ 5.1, 3.3, 1.7, 0.5 ],
[ 4.8, 3.4, 1.9, 0.2 ],
[ 5, 3, 1.6, 0.2 ],
[ 5, 3.4, 1.6, 0.4 ],
[ 5.2, 3.5, 1.5, 0.2 ],
[ 5.2, 3.4, 1.4, 0.2 ],
[ 4.7, 3.2, 1.6, 0.2 ],
[ 4.8, 3.1, 1.6, 0.2 ],
[ 5.4, 3.4, 1.5, 0.4 ],
[ 5.2, 4.1, 1.5, 0.1 ],
[ 5.5, 4.2, 1.4, 0.2 ],
[ 4.9, 3.1, 1.5, 0.1 ],
[ 5, 3.2, 1.2, 0.2 ],
[ 5.5, 3.5, 1.3, 0.2 ],
[ 4.9, 3.1, 1.5, 0.1 ],
[ 4.4, 3, 1.3, 0.2 ],
[ 5.1, 3.4, 1.5, 0.2 ],
[ 5, 3.5, 1.3, 0.3 ],
[ 4.5, 2.3, 1.3, 0.3 ],
[ 4.4, 3.2, 1.3, 0.2 ],
[ 5, 3.5, 1.6, 0.6 ],
[ 5.1, 3.8, 1.9, 0.4 ],
[ 4.8, 3, 1.4, 0.3 ],
[ 5.1, 3.8, 1.6, 0.2 ],
[ 4.6, 3.2, 1.4, 0.2 ],
[ 5.3, 3.7, 1.5, 0.2 ],
[ 5, 3.3, 1.4, 0.2 ],
[ 7, 3.2, 4.7, 1.4 ],
[ 6.4, 3.2, 4.5, 1.5 ],
[ 6.9, 3.1, 4.9, 1.5 ],
[ 5.5, 2.3, 4, 1.3 ],
[ 6.5, 2.8, 4.6, 1.5 ],
[ 5.7, 2.8, 4.5, 1.3 ],
[ 6.3, 3.3, 4.7, 1.6 ],
[ 4.9, 2.4, 3.3, 1 ],
[ 6.6, 2.9, 4.6, 1.3 ],
[ 5.2, 2.7, 3.9, 1.4 ],
[ 5, 2, 3.5, 1 ],
[ 5.9, 3, 4.2, 1.5 ],
[ 6, 2.2, 4, 1 ],
[ 6.1, 2.9, 4.7, 1.4 ],
[ 5.6, 2.9, 3.6, 1.3 ],
[ 6.7, 3.1, 4.4, 1.4 ],
[ 5.6, 3, 4.5, 1.5 ],
[ 5.8, 2.7, 4.1, 1 ],
[ 6.2, 2.2, 4.5, 1.5 ],
[ 5.6, 2.5, 3.9, 1.1 ],
[ 5.9, 3.2, 4.8, 1.8 ],
[ 6.1, 2.8, 4, 1.3 ],
[ 6.3, 2.5, 4.9, 1.5 ],
[ 6.1, 2.8, 4.7, 1.2 ],
[ 6.4, 2.9, 4.3, 1.3 ],
[ 6.6, 3, 4.4, 1.4 ],
[ 6.8, 2.8, 4.8, 1.4 ],
[ 6.7, 3, 5, 1.7 ],
[ 6, 2.9, 4.5, 1.5 ],
[ 5.7, 2.6, 3.5, 1 ],
[ 5.5, 2.4, 3.8, 1.1 ],
[ 5.5, 2.4, 3.7, 1 ],
[ 5.8, 2.7, 3.9, 1.2 ],
[ 6, 2.7, 5.1, 1.6 ],
[ 5.4, 3, 4.5, 1.5 ],
[ 6, 3.4, 4.5, 1.6 ],
[ 6.7, 3.1, 4.7, 1.5 ],
[ 6.3, 2.3, 4.4, 1.3 ],
[ 5.6, 3, 4.1, 1.3 ],
[ 5.5, 2.5, 4, 1.3 ],
[ 5.5, 2.6, 4.4, 1.2 ],
[ 6.1, 3, 4.6, 1.4 ],
[ 5.8, 2.6, 4, 1.2 ],
[ 5, 2.3, 3.3, 1 ],
[ 5.6, 2.7, 4.2, 1.3 ],
[ 5.7, 3, 4.2, 1.2 ],
[ 5.7, 2.9, 4.2, 1.3 ],
[ 6.2, 2.9, 4.3, 1.3 ],
[ 5.1, 2.5, 3, 1.1 ],
[ 5.7, 2.8, 4.1, 1.3 ],
[ 6.3, 3.3, 6, 2.5 ],
[ 5.8, 2.7, 5.1, 1.9 ],
[ 7.1, 3, 5.9, 2.1 ],
[ 6.3, 2.9, 5.6, 1.8 ],
[ 6.5, 3, 5.8, 2.2 ],
[ 7.6, 3, 6.6, 2.1 ],
[ 4.9, 2.5, 4.5, 1.7 ],
[ 7.3, 2.9, 6.3, 1.8 ],
[ 6.7, 2.5, 5.8, 1.8 ],
[ 7.2, 3.6, 6.1, 2.5 ],
[ 6.5, 3.2, 5.1, 2 ],
[ 6.4, 2.7, 5.3, 1.9 ],
[ 6.8, 3, 5.5, 2.1 ],
[ 5.7, 2.5, 5, 2 ],
[ 5.8, 2.8, 5.1, 2.4 ],
[ 6.4, 3.2, 5.3, 2.3 ],
[ 6.5, 3, 5.5, 1.8 ],
[ 7.7, 3.8, 6.7, 2.2 ],
[ 7.7, 2.6, 6.9, 2.3 ],
[ 6, 2.2, 5, 1.5 ],
[ 6.9, 3.2, 5.7, 2.3 ],
[ 5.6, 2.8, 4.9, 2 ],
[ 7.7, 2.8, 6.7, 2 ],
[ 6.3, 2.7, 4.9, 1.8 ],
[ 6.7, 3.3, 5.7, 2.1 ],
[ 7.2, 3.2, 6, 1.8 ],
[ 6.2, 2.8, 4.8, 1.8 ],
[ 6.1, 3, 4.9, 1.8 ],
[ 6.4, 2.8, 5.6, 2.1 ],
[ 7.2, 3, 5.8, 1.6 ],
[ 7.4, 2.8, 6.1, 1.9 ],
[ 7.9, 3.8, 6.4, 2 ],
[ 6.4, 2.8, 5.6, 2.2 ],
[ 6.3, 2.8, 5.1, 1.5 ],
[ 6.1, 2.6, 5.6, 1.4 ],
[ 7.7, 3, 6.1, 2.3 ],
[ 6.3, 3.4, 5.6, 2.4 ],
[ 6.4, 3.1, 5.5, 1.8 ],
[ 6, 3, 4.8, 1.8 ],
[ 6.9, 3.1, 5.4, 2.1 ],
[ 6.7, 3.1, 5.6, 2.4 ],
[ 6.9, 3.1, 5.1, 2.3 ],
[ 5.8, 2.7, 5.1, 1.9 ],
[ 6.8, 3.2, 5.9, 2.3 ],
[ 6.7, 3.3, 5.7, 2.5 ],
[ 6.7, 3, 5.2, 2.3 ],
[ 6.3, 2.5, 5, 1.9 ],
[ 6.5, 3, 5.2, 2 ],
[ 6.2, 3.4, 5.4, 2.3 ],
[ 5.9, 3, 5.1, 1.8 ],
]
Examples = {
'WineNormalized And PCA': {
'data': WineDataNorm,
'k': [2,3,4,5]
}
}
| WmHHooper/aima-python | submissions/Colburn/myKMeans.py | Python | mit | 4,864 |
# Author: Bichen Wu (bichen@berkeley.edu) 08/25/2016
"""Model configuration for pascal dataset"""
import numpy as np
from config.config import base_model_config
def kitti_squeezeDet_config():
"""Specify the parameters to tune below."""
mc = base_model_config('PASCAL_VOC')#base_model_config('KITTI')
mc.IMAGE_WIDTH = 1242
mc.IMAGE_HEIGHT = 375
mc.BATCH_SIZE = 20
mc.WEIGHT_DECAY = 0.0001
mc.LEARNING_RATE = 0.01
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.DATA_AUGMENTATION = True
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc
def set_anchors(mc):
H, W, B = 22, 76, 9
anchor_shapes = np.reshape(
[np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
| Walter1218/self_driving_car_ND | squeezeDet/src/config/kitti_squeezeDet_config.py | Python | mit | 2,066 |
# -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import Ice, threading
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
import math
import RoboCompRGBD
global RoboCompRGBD
replay_plugin_identifier = 'rgbd_hal'
def getReplayClass():
return RGBDI()
def getRecordClass(proxy):
return RGBDRecorder(proxy)
def getGraphicalUserInterface():
return RGBDGUI()
class RGBDGUI(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self,parent)
self.show()
self.measure = None
self.configuration = None
def getSize(self):
return QSize(500, 500)
def setConfiguration(self, configuration):
self.configuration = configuration
def setMeasure(self, measure):
self.measure = measure
def paintEvent(self, event):
pass
class RGBDI(RoboCompRGBD.RGBD):
def __init__(self):
self.measure = None
self.configuration = None
def setConfiguration(self, configuration):
self.configuration = configuration
def setMeasure(self, measure):
self.measure = measure
def getImage(self, measue):
return self.measure
def getMeasure(self):
return self.measure
def getData(self, current = None):
return self.measure
def getRGBDParams(self, current = None):
return self.configuration
class RGBDRecorder:
def __init__(self, proxy):
global RoboCompRGBD
self.proxy = RoboCompRGBD.RGBDPrx.checkedCast(proxy)
def getConfiguration(self):
return True
def getMeasure(self):
self.measure = self.proxy.getImage()
return self.measure
def measure(self):
return self.measure
| robocomp/robocomp | tools/rcreplay/rgbd.py | Python | gpl-3.0 | 2,317 |
# -*- coding: utf-8 -*-
# coding: UTF-8
#
# Copyright 2010-2015 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Setup file for pygit2."""
# Import from the future
from __future__ import print_function
# Import from the Standard Library
import codecs
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils import log
import os
from os import getenv, listdir, pathsep
from os.path import abspath, isfile
from setuptools import setup, Extension, Command
import shlex
from subprocess import Popen, PIPE
import sys
import unittest
# Get cffi major version
try:
import cffi
except ImportError:
cffi_major_version = None
else:
cffi_major_version = cffi.__version_info__[0]
# Import stuff from pygit2/_utils.py without loading the whole pygit2 package
sys.path.insert(0, 'pygit2')
from _build import __version__, get_libgit2_paths
if cffi_major_version == 0:
from _run import ffi, preamble, C_KEYWORDS
ffi.verify(preamble, **C_KEYWORDS)
del sys.path[0]
# Python 2 support
# See https://github.com/libgit2/pygit2/pull/180 for a discussion about this.
# Using six isn't an option here yet, we don't necessarily have six installed
if sys.version_info[0] == 2:
u = lambda s: unicode(s, 'utf-8')
else:
u = str
libgit2_bin, libgit2_include, libgit2_lib = get_libgit2_paths()
pygit2_exts = [os.path.join('src', name) for name in listdir('src')
if name.endswith('.c')]
class TestCommand(Command):
"""Command for running unittests without install."""
user_options = [("args=", None, '''The command args string passed to
unittest framework, such as
--args="-v -f"''')]
def initialize_options(self):
self.args = ''
def finalize_options(self):
pass
def run(self):
self.run_command('build')
bld = self.distribution.get_command_obj('build')
# Add build_lib in to sys.path so that unittest can found DLLs and libs
sys.path = [abspath(bld.build_lib)] + sys.path
test_argv0 = [sys.argv[0] + ' test --args=']
# For transfering args to unittest, we have to split args by ourself,
# so that command like:
#
# python setup.py test --args="-v -f"
#
# can be executed, and the parameter '-v -f' can be transfering to
# unittest properly.
test_argv = test_argv0 + shlex.split(self.args)
unittest.main(None, defaultTest='test.test_suite', argv=test_argv)
class sdist_files_from_git(sdist):
def get_file_list(self):
popen = Popen(['git', 'ls-files'], stdout=PIPE, stderr=PIPE,
universal_newlines=True)
stdoutdata, stderrdata = popen.communicate()
if popen.returncode != 0:
print(stderrdata)
sys.exit()
for line in stdoutdata.splitlines():
# Skip hidden files at the root
if line[0] == '.':
continue
self.filelist.append(line)
# Ok
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Version Control"]
with codecs.open('README.rst', 'r', 'utf-8') as readme:
long_description = readme.read()
cmdclass = {
'test': TestCommand,
'sdist': sdist_files_from_git,
}
# On Windows, we install the git2.dll too.
class BuildWithDLLs(build):
def _get_dlls(self):
# return a list of (FQ-in-name, relative-out-name) tuples.
ret = []
bld_ext = self.distribution.get_command_obj('build_ext')
compiler_type = bld_ext.compiler.compiler_type
libgit2_dlls = []
if compiler_type == 'msvc':
libgit2_dlls.append('git2.dll')
elif compiler_type == 'mingw32':
libgit2_dlls.append('libgit2.dll')
look_dirs = [libgit2_bin] + getenv("PATH", "").split(pathsep)
target = abspath(self.build_lib)
for bin in libgit2_dlls:
for look in look_dirs:
f = os.path.join(look, bin)
if isfile(f):
ret.append((f, target))
break
else:
log.warn("Could not find required DLL %r to include", bin)
log.debug("(looked in %s)", look_dirs)
return ret
def run(self):
build.run(self)
for s, d in self._get_dlls():
self.copy_file(s, d)
# On Windows we package up the dlls with the plugin.
if os.name == 'nt':
cmdclass['build'] = BuildWithDLLs
extra_args = {
'ext_modules': [
Extension('_pygit2', pygit2_exts, libraries=['git2'],
include_dirs=[libgit2_include],
library_dirs=[libgit2_lib]),
# FFI is added in the build step
],
}
if cffi_major_version == 0:
extra_args['ext_modules'].append(ffi.verifier.get_extension())
else:
extra_args['cffi_modules'] = ['pygit2/_run.py:ffi']
setup(name='pygit2',
description='Python bindings for libgit2.',
keywords='git',
version=__version__,
url='http://github.com/libgit2/pygit2',
classifiers=classifiers,
license='GPLv2 with linking exception',
maintainer=u('J. David Ibáñez'),
maintainer_email='jdavid.ibp@gmail.com',
long_description=long_description,
packages=['pygit2'],
package_data={'pygit2': ['decl.h']},
setup_requires=['cffi'],
install_requires=['cffi', 'six'],
zip_safe=False,
cmdclass=cmdclass,
**extra_args)
| Sheeo/pygit2 | setup.py | Python | gpl-2.0 | 6,812 |
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Configuration management for Ganeti
This module provides the interface to the Ganeti cluster configuration.
The configuration data is stored on every node but is updated on the master
only. After each update, the master distributes the data to the other nodes.
Currently, the data storage format is JSON. YAML was slow and consuming too
much memory.
"""
# pylint: disable=R0904
# R0904: Too many public methods
import copy
import os
import random
import logging
import time
import threading
import itertools
from ganeti import errors
from ganeti import utils
from ganeti import constants
import ganeti.wconfd as wc
from ganeti import objects
from ganeti import serializer
from ganeti import uidpool
from ganeti import netutils
from ganeti import runtime
from ganeti import pathutils
from ganeti import network
def GetWConfdContext(ec_id, livelock):
"""Prepare a context for communication with WConfd.
WConfd needs to know the identity of each caller to properly manage locks and
detect job death. This helper function prepares the identity object given a
job ID (optional) and a livelock file.
@type ec_id: int, or None
@param ec_id: the job ID or None, if the caller isn't a job
@type livelock: L{ganeti.utils.livelock.LiveLock}
@param livelock: a livelock object holding the lockfile needed for WConfd
@return: the WConfd context
"""
if ec_id is None:
return (threading.current_thread().getName(),
livelock.GetPath(), os.getpid())
else:
return (ec_id,
livelock.GetPath(), os.getpid())
def GetConfig(ec_id, livelock, **kwargs):
"""A utility function for constructing instances of ConfigWriter.
It prepares a WConfd context and uses it to create a ConfigWriter instance.
@type ec_id: int, or None
@param ec_id: the job ID or None, if the caller isn't a job
@type livelock: L{ganeti.utils.livelock.LiveLock}
@param livelock: a livelock object holding the lockfile needed for WConfd
@type kwargs: dict
@param kwargs: Any additional arguments for the ConfigWriter constructor
@rtype: L{ConfigWriter}
@return: the ConfigWriter context
"""
kwargs['wconfdcontext'] = GetWConfdContext(ec_id, livelock)
kwargs['wconfd'] = wc.Client()
return ConfigWriter(**kwargs)
def _ConfigSync(shared=0):
"""Configuration synchronization decorator.
"""
def wrap(fn):
def sync_function(*args, **kwargs):
with args[0].GetConfigManager(shared):
return fn(*args, **kwargs)
return sync_function
return wrap
# job id used for resource management at config upgrade time
_UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
def _ValidateConfig(data):
"""Verifies that a configuration dict looks valid.
This only verifies the version of the configuration.
@raise errors.ConfigurationError: if the version differs from what
we expect
"""
if data['version'] != constants.CONFIG_VERSION:
raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION,
data['version'])
class TemporaryReservationManager(object):
"""A temporary resource reservation manager.
This is used to reserve resources in a job, before using them, making sure
other jobs cannot get them in the meantime.
"""
def __init__(self):
self._ec_reserved = {}
def Reserved(self, resource):
for holder_reserved in self._ec_reserved.values():
if resource in holder_reserved:
return True
return False
def Reserve(self, ec_id, resource):
if self.Reserved(resource):
raise errors.ReservationError("Duplicate reservation for resource '%s'"
% str(resource))
if ec_id not in self._ec_reserved:
self._ec_reserved[ec_id] = set([resource])
else:
self._ec_reserved[ec_id].add(resource)
def DropECReservations(self, ec_id):
if ec_id in self._ec_reserved:
del self._ec_reserved[ec_id]
def GetReserved(self):
all_reserved = set()
for holder_reserved in self._ec_reserved.values():
all_reserved.update(holder_reserved)
return all_reserved
def GetECReserved(self, ec_id):
""" Used when you want to retrieve all reservations for a specific
execution context. E.g when commiting reserved IPs for a specific
network.
"""
ec_reserved = set()
if ec_id in self._ec_reserved:
ec_reserved.update(self._ec_reserved[ec_id])
return ec_reserved
def Generate(self, existing, generate_one_fn, ec_id):
"""Generate a new resource of this type
"""
assert callable(generate_one_fn)
all_elems = self.GetReserved()
all_elems.update(existing)
retries = 64
while retries > 0:
new_resource = generate_one_fn()
if new_resource is not None and new_resource not in all_elems:
break
else:
raise errors.ConfigurationError("Not able generate new resource"
" (last tried: %s)" % new_resource)
self.Reserve(ec_id, new_resource)
return new_resource
def _MatchNameComponentIgnoreCase(short_name, names):
"""Wrapper around L{utils.text.MatchNameComponent}.
"""
return utils.MatchNameComponent(short_name, names, case_sensitive=False)
def _CheckInstanceDiskIvNames(disks):
"""Checks if instance's disks' C{iv_name} attributes are in order.
@type disks: list of L{objects.Disk}
@param disks: List of disks
@rtype: list of tuples; (int, string, string)
@return: List of wrongly named disks, each tuple contains disk index,
expected and actual name
"""
result = []
for (idx, disk) in enumerate(disks):
exp_iv_name = "disk/%s" % idx
if disk.iv_name != exp_iv_name:
result.append((idx, exp_iv_name, disk.iv_name))
return result
class ConfigManager(object):
"""Locks the configuration and exposes it to be read or modified.
"""
def __init__(self, config_writer, shared=False):
assert isinstance(config_writer, ConfigWriter), \
"invalid argument: Not a ConfigWriter"
self._config_writer = config_writer
self._shared = shared
def __enter__(self):
try:
self._config_writer._OpenConfig(self._shared) # pylint: disable=W0212
except Exception:
logging.debug("Opening configuration failed")
try:
self._config_writer._CloseConfig(False) # pylint: disable=W0212
except Exception: # pylint: disable=W0703
logging.debug("Closing configuration failed as well")
raise
def __exit__(self, exc_type, exc_value, traceback):
# save the configuration, if this was a write opreration that succeeded
if exc_type is not None:
logging.debug("Configuration operation failed,"
" the changes will not be saved")
# pylint: disable=W0212
self._config_writer._CloseConfig(not self._shared and exc_type is None)
return False
def _UpdateIvNames(base_idx, disks):
"""Update the C{iv_name} attribute of disks.
@type disks: list of L{objects.Disk}
"""
for (idx, disk) in enumerate(disks):
disk.iv_name = "disk/%s" % (base_idx + idx)
class ConfigWriter(object):
"""The interface to the cluster configuration.
WARNING: The class is no longer thread-safe!
Each thread must construct a separate instance.
@ivar _all_rms: a list of all temporary reservation managers
"""
def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts,
accept_foreign=False, wconfdcontext=None, wconfd=None):
self.write_count = 0
self._config_data = None
self._SetConfigData(None)
self._offline = offline
if cfg_file is None:
self._cfg_file = pathutils.CLUSTER_CONF_FILE
else:
self._cfg_file = cfg_file
self._getents = _getents
self._temporary_ids = TemporaryReservationManager()
self._all_rms = [self._temporary_ids]
# Note: in order to prevent errors when resolving our name later,
# we compute it here once and reuse it; it's
# better to raise an error before starting to modify the config
# file than after it was modified
self._my_hostname = netutils.Hostname.GetSysName()
self._cfg_id = None
self._wconfdcontext = wconfdcontext
self._wconfd = wconfd
self._accept_foreign = accept_foreign
self._lock_count = 0
self._lock_current_shared = None
def _ConfigData(self):
return self._config_data
def OutDate(self):
self._config_data = None
def _SetConfigData(self, cfg):
self._config_data = cfg
def _GetWConfdContext(self):
return self._wconfdcontext
# this method needs to be static, so that we can call it on the class
@staticmethod
def IsCluster():
"""Check if the cluster is configured.
"""
return os.path.exists(pathutils.CLUSTER_CONF_FILE)
@_ConfigSync(shared=1)
def GetNdParams(self, node):
"""Get the node params populated with cluster defaults.
@type node: L{objects.Node}
@param node: The node we want to know the params for
@return: A dict with the filled in node params
"""
nodegroup = self._UnlockedGetNodeGroup(node.group)
return self._ConfigData().cluster.FillND(node, nodegroup)
@_ConfigSync(shared=1)
def GetNdGroupParams(self, nodegroup):
"""Get the node groups params populated with cluster defaults.
@type nodegroup: L{objects.NodeGroup}
@param nodegroup: The node group we want to know the params for
@return: A dict with the filled in node group params
"""
return self._UnlockedGetNdGroupParams(nodegroup)
def _UnlockedGetNdGroupParams(self, group):
"""Get the ndparams of the group.
@type group: L{objects.NodeGroup}
@param group: The group we want to know the params for
@rtype: dict of str to int
@return: A dict with the filled in node group params
"""
return self._ConfigData().cluster.FillNDGroup(group)
@_ConfigSync(shared=1)
def GetGroupSshPorts(self):
"""Get a map of group UUIDs to SSH ports.
@rtype: dict of str to int
@return: a dict mapping the UUIDs to the SSH ports
"""
port_map = {}
for uuid, group in self._config_data.nodegroups.items():
ndparams = self._UnlockedGetNdGroupParams(group)
port = ndparams.get(constants.ND_SSH_PORT)
port_map[uuid] = port
return port_map
@_ConfigSync(shared=1)
def GetInstanceDiskParams(self, instance):
"""Get the disk params populated with inherit chain.
@type instance: L{objects.Instance}
@param instance: The instance we want to know the params for
@return: A dict with the filled in disk params
"""
node = self._UnlockedGetNodeInfo(instance.primary_node)
nodegroup = self._UnlockedGetNodeGroup(node.group)
return self._UnlockedGetGroupDiskParams(nodegroup)
@_ConfigSync()
def SetInstanceDiskTemplate(self, inst_uuid, disk_template):
"""Set the instance's disk template to the given value.
@type inst_uuid: string
@param inst_uuid: The UUID of the instance object
@type disk_template: string
@param disk_template: The new disk template of the instance
"""
instance = self._UnlockedGetInstanceInfo(inst_uuid)
if instance is None:
raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
# Update the disk template of the instance
instance.disk_template = disk_template
def _UnlockedGetInstanceDisks(self, inst_uuid):
"""Return the disks' info for the given instance
@type inst_uuid: string
@param inst_uuid: The UUID of the instance we want to know the disks for
@rtype: List of L{objects.Disk}
@return: A list with all the disks' info
"""
instance = self._UnlockedGetInstanceInfo(inst_uuid)
if instance is None:
raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
return [self._UnlockedGetDiskInfo(disk_uuid)
for disk_uuid in instance.disks]
@_ConfigSync(shared=1)
def GetInstanceDisks(self, inst_uuid):
"""Return the disks' info for the given instance
This is a simple wrapper over L{_UnlockedGetInstanceDisks}.
"""
return self._UnlockedGetInstanceDisks(inst_uuid)
def _UnlockedAddDisk(self, disk):
"""Add a disk to the config.
@type disk: L{objects.Disk}
@param disk: The disk object
"""
if not isinstance(disk, objects.Disk):
raise errors.ProgrammerError("Invalid type passed to _UnlockedAddDisk")
logging.info("Adding disk %s to configuration", disk.uuid)
self._CheckUniqueUUID(disk, include_temporary=False)
disk.serial_no = 1
disk.ctime = disk.mtime = time.time()
disk.UpgradeConfig()
self._ConfigData().disks[disk.uuid] = disk
self._ConfigData().cluster.serial_no += 1
def _UnlockedAttachInstanceDisk(self, inst_uuid, disk_uuid, idx=None):
"""Attach a disk to an instance.
@type inst_uuid: string
@param inst_uuid: The UUID of the instance object
@type disk_uuid: string
@param disk_uuid: The UUID of the disk object
@type idx: int
@param idx: the index of the newly attached disk; if not
passed, the disk will be attached as the last one.
"""
instance = self._UnlockedGetInstanceInfo(inst_uuid)
if instance is None:
raise errors.ConfigurationError("Instance %s doesn't exist"
% inst_uuid)
if disk_uuid not in self._ConfigData().disks:
raise errors.ConfigurationError("Disk %s doesn't exist" % disk_uuid)
if idx is None:
idx = len(instance.disks)
else:
if idx < 0:
raise IndexError("Not accepting negative indices other than -1")
elif idx > len(instance.disks):
raise IndexError("Got disk index %s, but there are only %s" %
(idx, len(instance.disks)))
# Disk must not be attached anywhere else
for inst in self._ConfigData().instances.values():
if disk_uuid in inst.disks:
raise errors.ReservationError("Disk %s already attached to instance %s"
% (disk_uuid, inst.name))
instance.disks.insert(idx, disk_uuid)
instance_disks = self._UnlockedGetInstanceDisks(inst_uuid)
_UpdateIvNames(idx, instance_disks[idx:])
instance.serial_no += 1
instance.mtime = time.time()
@_ConfigSync()
def AddInstanceDisk(self, inst_uuid, disk, idx=None):
"""Add a disk to the config and attach it to instance.
This is a simple wrapper over L{_UnlockedAddDisk} and
L{_UnlockedAttachInstanceDisk}.
"""
self._UnlockedAddDisk(disk)
self._UnlockedAttachInstanceDisk(inst_uuid, disk.uuid, idx)
def _UnlockedDetachInstanceDisk(self, inst_uuid, disk_uuid):
"""Detach a disk from an instance.
@type inst_uuid: string
@param inst_uuid: The UUID of the instance object
@type disk_uuid: string
@param disk_uuid: The UUID of the disk object
"""
instance = self._UnlockedGetInstanceInfo(inst_uuid)
if instance is None:
raise errors.ConfigurationError("Instance %s doesn't exist"
% inst_uuid)
if disk_uuid not in self._ConfigData().disks:
raise errors.ConfigurationError("Disk %s doesn't exist" % disk_uuid)
# Check if disk is attached to the instance
if disk_uuid not in instance.disks:
raise errors.ProgrammerError("Disk %s is not attached to an instance"
% disk_uuid)
idx = instance.disks.index(disk_uuid)
instance.disks.remove(disk_uuid)
instance_disks = self._UnlockedGetInstanceDisks(inst_uuid)
_UpdateIvNames(idx, instance_disks[idx:])
instance.serial_no += 1
instance.mtime = time.time()
def _UnlockedRemoveDisk(self, disk_uuid):
"""Remove the disk from the configuration.
@type disk_uuid: string
@param disk_uuid: The UUID of the disk object
"""
if disk_uuid not in self._ConfigData().disks:
raise errors.ConfigurationError("Disk %s doesn't exist" % disk_uuid)
# Disk must not be attached anywhere
for inst in self._ConfigData().instances.values():
if disk_uuid in inst.disks:
raise errors.ReservationError("Cannot remove disk %s. Disk is"
" attached to instance %s"
% (disk_uuid, inst.name))
# Remove disk from config file
del self._ConfigData().disks[disk_uuid]
self._ConfigData().cluster.serial_no += 1
@_ConfigSync()
def RemoveInstanceDisk(self, inst_uuid, disk_uuid):
"""Detach a disk from an instance and remove it from the config.
This is a simple wrapper over L{_UnlockedDetachInstanceDisk} and
L{_UnlockedRemoveDisk}.
"""
self._UnlockedDetachInstanceDisk(inst_uuid, disk_uuid)
self._UnlockedRemoveDisk(disk_uuid)
def _UnlockedGetDiskInfo(self, disk_uuid):
"""Returns information about a disk.
It takes the information from the configuration file.
@param disk_uuid: UUID of the disk
@rtype: L{objects.Disk}
@return: the disk object
"""
if disk_uuid not in self._ConfigData().disks:
return None
return self._ConfigData().disks[disk_uuid]
@_ConfigSync(shared=1)
def GetDiskInfo(self, disk_uuid):
"""Returns information about a disk.
This is a simple wrapper over L{_UnlockedGetDiskInfo}.
"""
return self._UnlockedGetDiskInfo(disk_uuid)
def _AllInstanceNodes(self, inst_uuid):
"""Compute the set of all disk-related nodes for an instance.
This abstracts away some work from '_UnlockedGetInstanceNodes'
and '_UnlockedGetInstanceSecondaryNodes'.
@type inst_uuid: string
@param inst_uuid: The UUID of the instance we want to get nodes for
@rtype: set of strings
@return: A set of names for all the nodes of the instance
"""
instance = self._UnlockedGetInstanceInfo(inst_uuid)
if instance is None:
raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
instance_disks = self._UnlockedGetInstanceDisks(inst_uuid)
all_nodes = []
for disk in instance_disks:
all_nodes.extend(disk.all_nodes)
return (set(all_nodes), instance)
def _UnlockedGetInstanceNodes(self, inst_uuid):
"""Get all disk-related nodes for an instance.
For non-DRBD, this will be empty, for DRBD it will contain both
the primary and the secondaries.
@type inst_uuid: string
@param inst_uuid: The UUID of the instance we want to get nodes for
@rtype: list of strings
@return: A list of names for all the nodes of the instance
"""
(all_nodes, instance) = self._AllInstanceNodes(inst_uuid)
# ensure that primary node is always the first
all_nodes.discard(instance.primary_node)
return (instance.primary_node, ) + tuple(all_nodes)
@_ConfigSync(shared=1)
def GetInstanceNodes(self, inst_uuid):
"""Get all disk-related nodes for an instance.
This is just a wrapper over L{_UnlockedGetInstanceNodes}
"""
return self._UnlockedGetInstanceNodes(inst_uuid)
def _UnlockedGetInstanceSecondaryNodes(self, inst_uuid):
"""Get the list of secondary nodes.
@type inst_uuid: string
@param inst_uuid: The UUID of the instance we want to get nodes for
@rtype: list of strings
@return: A list of names for all the secondary nodes of the instance
"""
(all_nodes, instance) = self._AllInstanceNodes(inst_uuid)
all_nodes.discard(instance.primary_node)
return tuple(all_nodes)
@_ConfigSync(shared=1)
def GetInstanceSecondaryNodes(self, inst_uuid):
"""Get the list of secondary nodes.
This is a simple wrapper over L{_UnlockedGetInstanceSecondaryNodes}.
"""
return self._UnlockedGetInstanceSecondaryNodes(inst_uuid)
def _UnlockedGetInstanceLVsByNode(self, inst_uuid, lvmap=None):
"""Provide a mapping of node to LVs a given instance owns.
@type inst_uuid: string
@param inst_uuid: The UUID of the instance we want to
compute the LVsByNode for
@type lvmap: dict
@param lvmap: Optional dictionary to receive the
'node' : ['lv', ...] data.
@rtype: dict or None
@return: None if lvmap arg is given, otherwise, a dictionary of
the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
volumeN is of the form "vg_name/lv_name", compatible with
GetVolumeList()
"""
def _MapLVsByNode(lvmap, devices, node_uuid):
"""Recursive helper function."""
if not node_uuid in lvmap:
lvmap[node_uuid] = []
for dev in devices:
if dev.dev_type == constants.DT_PLAIN:
lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
elif dev.dev_type in constants.DTS_DRBD:
if dev.children:
_MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
_MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
elif dev.children:
_MapLVsByNode(lvmap, dev.children, node_uuid)
instance = self._UnlockedGetInstanceInfo(inst_uuid)
if instance is None:
raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
if lvmap is None:
lvmap = {}
ret = lvmap
else:
ret = None
_MapLVsByNode(lvmap,
self._UnlockedGetInstanceDisks(instance.uuid),
instance.primary_node)
return ret
@_ConfigSync(shared=1)
def GetInstanceLVsByNode(self, inst_uuid, lvmap=None):
"""Provide a mapping of node to LVs a given instance owns.
This is a simple wrapper over L{_UnlockedGetInstanceLVsByNode}
"""
return self._UnlockedGetInstanceLVsByNode(inst_uuid, lvmap=lvmap)
@_ConfigSync(shared=1)
def GetGroupDiskParams(self, group):
"""Get the disk params populated with inherit chain.
@type group: L{objects.NodeGroup}
@param group: The group we want to know the params for
@return: A dict with the filled in disk params
"""
return self._UnlockedGetGroupDiskParams(group)
def _UnlockedGetGroupDiskParams(self, group):
"""Get the disk params populated with inherit chain down to node-group.
@type group: L{objects.NodeGroup}
@param group: The group we want to know the params for
@return: A dict with the filled in disk params
"""
data = self._ConfigData().cluster.SimpleFillDP(group.diskparams)
assert isinstance(data, dict), "Not a dictionary: " + str(data)
return data
@_ConfigSync(shared=1)
def GetPotentialMasterCandidates(self):
"""Gets the list of node names of potential master candidates.
@rtype: list of str
@return: list of node names of potential master candidates
"""
# FIXME: Note that currently potential master candidates are nodes
# but this definition will be extended once RAPI-unmodifiable
# parameters are introduced.
nodes = self._UnlockedGetAllNodesInfo()
return [node_info.name for node_info in nodes.values()]
def GenerateMAC(self, net_uuid, _ec_id):
"""Generate a MAC for an instance.
This should check the current instances for duplicates.
"""
return self._wconfd.GenerateMAC(self._GetWConfdContext(), net_uuid)
def ReserveMAC(self, mac, _ec_id):
"""Reserve a MAC for an instance.
This only checks instances managed by this cluster, it does not
check for potential collisions elsewhere.
"""
self._wconfd.ReserveMAC(self._GetWConfdContext(), mac)
def _UnlockedCommitTemporaryIps(self, _ec_id):
"""Commit all reserved IP address to their respective pools
"""
if self._offline:
raise errors.ProgrammerError("Can't call CommitTemporaryIps"
" in offline mode")
ips = self._wconfd.ListReservedIps(self._GetWConfdContext())
for action, address, net_uuid in ips:
self._UnlockedCommitIp(action, net_uuid, address)
def _UnlockedCommitIp(self, action, net_uuid, address):
"""Commit a reserved IP address to an IP pool.
The IP address is taken from the network's IP pool and marked as free.
"""
nobj = self._UnlockedGetNetwork(net_uuid)
if nobj is None:
raise errors.ProgrammerError("Network '%s' not found" % (net_uuid, ))
pool = network.AddressPool(nobj)
if action == constants.RESERVE_ACTION:
pool.Reserve(address)
elif action == constants.RELEASE_ACTION:
pool.Release(address)
def ReleaseIp(self, net_uuid, address, _ec_id):
"""Give a specific IP address back to an IP pool.
The IP address is returned to the IP pool and marked as reserved.
"""
if net_uuid:
if self._offline:
raise errors.ProgrammerError("Can't call ReleaseIp in offline mode")
self._wconfd.ReleaseIp(self._GetWConfdContext(), net_uuid, address)
def GenerateIp(self, net_uuid, _ec_id):
"""Find a free IPv4 address for an instance.
"""
if self._offline:
raise errors.ProgrammerError("Can't call GenerateIp in offline mode")
return self._wconfd.GenerateIp(self._GetWConfdContext(), net_uuid)
def ReserveIp(self, net_uuid, address, _ec_id, check=True):
"""Reserve a given IPv4 address for use by an instance.
"""
if self._offline:
raise errors.ProgrammerError("Can't call ReserveIp in offline mode")
return self._wconfd.ReserveIp(self._GetWConfdContext(), net_uuid, address,
check)
def ReserveLV(self, lv_name, _ec_id):
"""Reserve an VG/LV pair for an instance.
@type lv_name: string
@param lv_name: the logical volume name to reserve
"""
return self._wconfd.ReserveLV(self._GetWConfdContext(), lv_name)
def GenerateDRBDSecret(self, _ec_id):
"""Generate a DRBD secret.
This checks the current disks for duplicates.
"""
return self._wconfd.GenerateDRBDSecret(self._GetWConfdContext())
# FIXME: After _AllIDs is removed, move it to config_mock.py
def _AllLVs(self):
"""Compute the list of all LVs.
"""
lvnames = set()
for instance in self._ConfigData().instances.values():
node_data = self._UnlockedGetInstanceLVsByNode(instance.uuid)
for lv_list in node_data.values():
lvnames.update(lv_list)
return lvnames
def _AllNICs(self):
"""Compute the list of all NICs.
"""
nics = []
for instance in self._ConfigData().instances.values():
nics.extend(instance.nics)
return nics
def _AllIDs(self, include_temporary):
"""Compute the list of all UUIDs and names we have.
@type include_temporary: boolean
@param include_temporary: whether to include the _temporary_ids set
@rtype: set
@return: a set of IDs
"""
existing = set()
if include_temporary:
existing.update(self._temporary_ids.GetReserved())
existing.update(self._AllLVs())
existing.update(self._ConfigData().instances.keys())
existing.update(self._ConfigData().nodes.keys())
existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
return existing
def _GenerateUniqueID(self, ec_id):
"""Generate an unique UUID.
This checks the current node, instances and disk names for
duplicates.
@rtype: string
@return: the unique id
"""
existing = self._AllIDs(include_temporary=False)
return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
@_ConfigSync(shared=1)
def GenerateUniqueID(self, ec_id):
"""Generate an unique ID.
This is just a wrapper over the unlocked version.
@type ec_id: string
@param ec_id: unique id for the job to reserve the id to
"""
return self._GenerateUniqueID(ec_id)
def _AllMACs(self):
"""Return all MACs present in the config.
@rtype: list
@return: the list of all MACs
"""
result = []
for instance in self._ConfigData().instances.values():
for nic in instance.nics:
result.append(nic.mac)
return result
def _AllDRBDSecrets(self):
"""Return all DRBD secrets present in the config.
@rtype: list
@return: the list of all DRBD secrets
"""
def helper(disk, result):
"""Recursively gather secrets from this disk."""
if disk.dev_type == constants.DT_DRBD8:
result.append(disk.logical_id[5])
if disk.children:
for child in disk.children:
helper(child, result)
result = []
for disk in self._ConfigData().disks.values():
helper(disk, result)
return result
@staticmethod
def _VerifyDisks(data, result):
"""Per-disk verification checks
Extends L{result} with diagnostic information about the disks.
@type data: see L{_ConfigData}
@param data: configuration data
@type result: list of strings
@param result: list containing diagnostic messages
"""
instance_disk_uuids = [d for insts in data.instances.values()
for d in insts.disks]
for disk_uuid in data.disks:
disk = data.disks[disk_uuid]
result.extend(["disk %s error: %s" % (disk.uuid, msg)
for msg in disk.Verify()])
if disk.uuid != disk_uuid:
result.append("disk '%s' is indexed by wrong UUID '%s'" %
(disk.name, disk_uuid))
if disk.uuid not in instance_disk_uuids:
result.append("disk '%s' is not attached to any instance" %
disk.uuid)
def _UnlockedVerifyConfig(self):
"""Verify function.
@rtype: list
@return: a list of error messages; a non-empty list signifies
configuration errors
"""
# pylint: disable=R0914
result = []
seen_macs = []
ports = {}
data = self._ConfigData()
cluster = data.cluster
# First call WConfd to perform its checks, if we're not offline
if not self._offline:
try:
self._wconfd.VerifyConfig()
except errors.ConfigVerifyError, err:
try:
for msg in err.args[1]:
result.append(msg)
except IndexError:
pass
def _helper(owner, attr, value, template):
try:
utils.ForceDictType(value, template)
except errors.GenericError, err:
result.append("%s has invalid %s: %s" % (owner, attr, err))
def _helper_nic(owner, params):
try:
objects.NIC.CheckParameterSyntax(params)
except errors.ConfigurationError, err:
result.append("%s has invalid nicparams: %s" % (owner, err))
def _helper_ipolicy(owner, ipolicy, iscluster):
try:
objects.InstancePolicy.CheckParameterSyntax(ipolicy, iscluster)
except errors.ConfigurationError, err:
result.append("%s has invalid instance policy: %s" % (owner, err))
for key, value in ipolicy.items():
if key == constants.ISPECS_MINMAX:
for k in range(len(value)):
_helper_ispecs(owner, "ipolicy/%s[%s]" % (key, k), value[k])
elif key == constants.ISPECS_STD:
_helper(owner, "ipolicy/" + key, value,
constants.ISPECS_PARAMETER_TYPES)
else:
# FIXME: assuming list type
if key in constants.IPOLICY_PARAMETERS:
exp_type = float
# if the value is int, it can be converted into float
convertible_types = [int]
else:
exp_type = list
convertible_types = []
# Try to convert from allowed types, if necessary.
if any(isinstance(value, ct) for ct in convertible_types):
try:
value = exp_type(value)
ipolicy[key] = value
except ValueError:
pass
if not isinstance(value, exp_type):
result.append("%s has invalid instance policy: for %s,"
" expecting %s, got %s" %
(owner, key, exp_type.__name__, type(value)))
def _helper_ispecs(owner, parentkey, params):
for (key, value) in params.items():
fullkey = "/".join([parentkey, key])
_helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES)
# check cluster parameters
_helper("cluster", "beparams", cluster.SimpleFillBE({}),
constants.BES_PARAMETER_TYPES)
_helper("cluster", "nicparams", cluster.SimpleFillNIC({}),
constants.NICS_PARAMETER_TYPES)
_helper_nic("cluster", cluster.SimpleFillNIC({}))
_helper("cluster", "ndparams", cluster.SimpleFillND({}),
constants.NDS_PARAMETER_TYPES)
_helper_ipolicy("cluster", cluster.ipolicy, True)
for disk_template in cluster.diskparams:
if disk_template not in constants.DTS_HAVE_ACCESS:
continue
access = cluster.diskparams[disk_template].get(constants.LDP_ACCESS,
constants.DISK_KERNELSPACE)
if access not in constants.DISK_VALID_ACCESS_MODES:
result.append(
"Invalid value of '%s:%s': '%s' (expected one of %s)" % (
disk_template, constants.LDP_ACCESS, access,
utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES)
)
)
self._VerifyDisks(data, result)
# per-instance checks
for instance_uuid in data.instances:
instance = data.instances[instance_uuid]
if instance.uuid != instance_uuid:
result.append("instance '%s' is indexed by wrong UUID '%s'" %
(instance.name, instance_uuid))
if instance.primary_node not in data.nodes:
result.append("instance '%s' has invalid primary node '%s'" %
(instance.name, instance.primary_node))
for snode in self._UnlockedGetInstanceSecondaryNodes(instance.uuid):
if snode not in data.nodes:
result.append("instance '%s' has invalid secondary node '%s'" %
(instance.name, snode))
for idx, nic in enumerate(instance.nics):
if nic.mac in seen_macs:
result.append("instance '%s' has NIC %d mac %s duplicate" %
(instance.name, idx, nic.mac))
else:
seen_macs.append(nic.mac)
if nic.nicparams:
filled = cluster.SimpleFillNIC(nic.nicparams)
owner = "instance %s nic %d" % (instance.name, idx)
_helper(owner, "nicparams",
filled, constants.NICS_PARAMETER_TYPES)
_helper_nic(owner, filled)
# disk template checks
if not instance.disk_template in data.cluster.enabled_disk_templates:
result.append("instance '%s' uses the disabled disk template '%s'." %
(instance.name, instance.disk_template))
# parameter checks
if instance.beparams:
_helper("instance %s" % instance.name, "beparams",
cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
# check that disks exists
for disk_uuid in instance.disks:
if disk_uuid not in data.disks:
result.append("Instance '%s' has invalid disk '%s'" %
(instance.name, disk_uuid))
instance_disks = self._UnlockedGetInstanceDisks(instance.uuid)
# gather the drbd ports for duplicate checks
for (idx, dsk) in enumerate(instance_disks):
if dsk.dev_type in constants.DTS_DRBD:
tcp_port = dsk.logical_id[2]
if tcp_port not in ports:
ports[tcp_port] = []
ports[tcp_port].append((instance.name, "drbd disk %s" % idx))
# gather network port reservation
net_port = getattr(instance, "network_port", None)
if net_port is not None:
if net_port not in ports:
ports[net_port] = []
ports[net_port].append((instance.name, "network port"))
wrong_names = _CheckInstanceDiskIvNames(instance_disks)
if wrong_names:
tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" %
(idx, exp_name, actual_name))
for (idx, exp_name, actual_name) in wrong_names)
result.append("Instance '%s' has wrongly named disks: %s" %
(instance.name, tmp))
# cluster-wide pool of free ports
for free_port in cluster.tcpudp_port_pool:
if free_port not in ports:
ports[free_port] = []
ports[free_port].append(("cluster", "port marked as free"))
# compute tcp/udp duplicate ports
keys = ports.keys()
keys.sort()
for pnum in keys:
pdata = ports[pnum]
if len(pdata) > 1:
txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
# highest used tcp port check
if keys:
if keys[-1] > cluster.highest_used_port:
result.append("Highest used port mismatch, saved %s, computed %s" %
(cluster.highest_used_port, keys[-1]))
if not data.nodes[cluster.master_node].master_candidate:
result.append("Master node is not a master candidate")
# master candidate checks
mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
if mc_now < mc_max:
result.append("Not enough master candidates: actual %d, target %d" %
(mc_now, mc_max))
# node checks
for node_uuid, node in data.nodes.items():
if node.uuid != node_uuid:
result.append("Node '%s' is indexed by wrong UUID '%s'" %
(node.name, node_uuid))
if [node.master_candidate, node.drained, node.offline].count(True) > 1:
result.append("Node %s state is invalid: master_candidate=%s,"
" drain=%s, offline=%s" %
(node.name, node.master_candidate, node.drained,
node.offline))
if node.group not in data.nodegroups:
result.append("Node '%s' has invalid group '%s'" %
(node.name, node.group))
else:
_helper("node %s" % node.name, "ndparams",
cluster.FillND(node, data.nodegroups[node.group]),
constants.NDS_PARAMETER_TYPES)
used_globals = constants.NDC_GLOBALS.intersection(node.ndparams)
if used_globals:
result.append("Node '%s' has some global parameters set: %s" %
(node.name, utils.CommaJoin(used_globals)))
# nodegroups checks
nodegroups_names = set()
for nodegroup_uuid in data.nodegroups:
nodegroup = data.nodegroups[nodegroup_uuid]
if nodegroup.uuid != nodegroup_uuid:
result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'"
% (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
if utils.UUID_RE.match(nodegroup.name.lower()):
result.append("node group '%s' (uuid: '%s') has uuid-like name" %
(nodegroup.name, nodegroup.uuid))
if nodegroup.name in nodegroups_names:
result.append("duplicate node group name '%s'" % nodegroup.name)
else:
nodegroups_names.add(nodegroup.name)
group_name = "group %s" % nodegroup.name
_helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy),
False)
if nodegroup.ndparams:
_helper(group_name, "ndparams",
cluster.SimpleFillND(nodegroup.ndparams),
constants.NDS_PARAMETER_TYPES)
# drbd minors check
# FIXME: The check for DRBD map needs to be implemented in WConfd
# IP checks
default_nicparams = cluster.nicparams[constants.PP_DEFAULT]
ips = {}
def _AddIpAddress(ip, name):
ips.setdefault(ip, []).append(name)
_AddIpAddress(cluster.master_ip, "cluster_ip")
for node in data.nodes.values():
_AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
if node.secondary_ip != node.primary_ip:
_AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)
for instance in data.instances.values():
for idx, nic in enumerate(instance.nics):
if nic.ip is None:
continue
nicparams = objects.FillDict(default_nicparams, nic.nicparams)
nic_mode = nicparams[constants.NIC_MODE]
nic_link = nicparams[constants.NIC_LINK]
if nic_mode == constants.NIC_MODE_BRIDGED:
link = "bridge:%s" % nic_link
elif nic_mode == constants.NIC_MODE_ROUTED:
link = "route:%s" % nic_link
elif nic_mode == constants.NIC_MODE_OVS:
link = "ovs:%s" % nic_link
else:
raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)
_AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network),
"instance:%s/nic:%d" % (instance.name, idx))
for ip, owners in ips.items():
if len(owners) > 1:
result.append("IP address %s is used by multiple owners: %s" %
(ip, utils.CommaJoin(owners)))
return result
def _UnlockedVerifyConfigAndLog(self, feedback_fn=None):
"""Verify the configuration and log any errors.
The errors get logged as critical errors and also to the feedback function,
if given.
@param feedback_fn: Callable feedback function
@rtype: list
@return: a list of error messages; a non-empty list signifies
configuration errors
"""
assert feedback_fn is None or callable(feedback_fn)
# Warn on config errors, but don't abort the save - the
# configuration has already been modified, and we can't revert;
# the best we can do is to warn the user and save as is, leaving
# recovery to the user
config_errors = self._UnlockedVerifyConfig()
if config_errors:
errmsg = ("Configuration data is not consistent: %s" %
(utils.CommaJoin(config_errors)))
logging.critical(errmsg)
if feedback_fn:
feedback_fn(errmsg)
return config_errors
@_ConfigSync(shared=1)
def VerifyConfig(self):
"""Verify function.
This is just a wrapper over L{_UnlockedVerifyConfig}.
@rtype: list
@return: a list of error messages; a non-empty list signifies
configuration errors
"""
return self._UnlockedVerifyConfig()
@_ConfigSync()
def AddTcpUdpPort(self, port):
"""Adds a new port to the available port pool.
@warning: this method does not "flush" the configuration (via
L{_WriteConfig}); callers should do that themselves once the
configuration is stable
"""
if not isinstance(port, int):
raise errors.ProgrammerError("Invalid type passed for port")
self._ConfigData().cluster.tcpudp_port_pool.add(port)
@_ConfigSync(shared=1)
def GetPortList(self):
"""Returns a copy of the current port list.
"""
return self._ConfigData().cluster.tcpudp_port_pool.copy()
@_ConfigSync()
def AllocatePort(self):
"""Allocate a port.
The port will be taken from the available port pool or from the
default port range (and in this case we increase
highest_used_port).
"""
# If there are TCP/IP ports configured, we use them first.
if self._ConfigData().cluster.tcpudp_port_pool:
port = self._ConfigData().cluster.tcpudp_port_pool.pop()
else:
port = self._ConfigData().cluster.highest_used_port + 1
if port >= constants.LAST_DRBD_PORT:
raise errors.ConfigurationError("The highest used port is greater"
" than %s. Aborting." %
constants.LAST_DRBD_PORT)
self._ConfigData().cluster.highest_used_port = port
return port
@_ConfigSync()
def ComputeDRBDMap(self):
"""Compute the used DRBD minor/nodes.
This is just a wrapper over a call to WConfd.
@return: dictionary of node_uuid: dict of minor: instance_uuid;
the returned dict will have all the nodes in it (even if with
an empty list).
"""
if self._offline:
raise errors.ProgrammerError("Can't call ComputeDRBDMap in offline mode")
else:
return dict(map(lambda (k, v): (k, dict(v)),
self._wconfd.ComputeDRBDMap()))
def AllocateDRBDMinor(self, node_uuids, inst_uuid):
"""Allocate a drbd minor.
This is just a wrapper over a call to WConfd.
The free minor will be automatically computed from the existing
devices. A node can be given multiple times in order to allocate
multiple minors. The result is the list of minors, in the same
order as the passed nodes.
@type inst_uuid: string
@param inst_uuid: the instance for which we allocate minors
"""
assert isinstance(inst_uuid, basestring), \
"Invalid argument '%s' passed to AllocateDRBDMinor" % inst_uuid
if self._offline:
raise errors.ProgrammerError("Can't call AllocateDRBDMinor"
" in offline mode")
result = self._wconfd.AllocateDRBDMinor(inst_uuid, node_uuids)
logging.debug("Request to allocate drbd minors, input: %s, returning %s",
node_uuids, result)
return result
def _UnlockedReleaseDRBDMinors(self, inst_uuid):
"""Release temporary drbd minors allocated for a given instance.
This is just a wrapper over a call to WConfd.
@type inst_uuid: string
@param inst_uuid: the instance for which temporary minors should be
released
"""
assert isinstance(inst_uuid, basestring), \
"Invalid argument passed to ReleaseDRBDMinors"
# in offline mode we allow the calls to release DRBD minors,
# because then nothing can be allocated anyway;
# this is useful for testing
if not self._offline:
self._wconfd.ReleaseDRBDMinors(inst_uuid)
@_ConfigSync()
def ReleaseDRBDMinors(self, inst_uuid):
"""Release temporary drbd minors allocated for a given instance.
This should be called on the error paths, on the success paths
it's automatically called by the ConfigWriter add and update
functions.
This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
@type inst_uuid: string
@param inst_uuid: the instance for which temporary minors should be
released
"""
self._UnlockedReleaseDRBDMinors(inst_uuid)
@_ConfigSync(shared=1)
def GetConfigVersion(self):
"""Get the configuration version.
@return: Config version
"""
return self._ConfigData().version
@_ConfigSync(shared=1)
def GetClusterName(self):
"""Get cluster name.
@return: Cluster name
"""
return self._ConfigData().cluster.cluster_name
@_ConfigSync(shared=1)
def GetMasterNode(self):
"""Get the UUID of the master node for this cluster.
@return: Master node UUID
"""
return self._ConfigData().cluster.master_node
@_ConfigSync(shared=1)
def GetMasterNodeName(self):
"""Get the hostname of the master node for this cluster.
@return: Master node hostname
"""
return self._UnlockedGetNodeName(self._ConfigData().cluster.master_node)
@_ConfigSync(shared=1)
def GetMasterNodeInfo(self):
"""Get the master node information for this cluster.
@rtype: objects.Node
@return: Master node L{objects.Node} object
"""
return self._UnlockedGetNodeInfo(self._ConfigData().cluster.master_node)
@_ConfigSync(shared=1)
def GetMasterIP(self):
"""Get the IP of the master node for this cluster.
@return: Master IP
"""
return self._ConfigData().cluster.master_ip
@_ConfigSync(shared=1)
def GetMasterNetdev(self):
"""Get the master network device for this cluster.
"""
return self._ConfigData().cluster.master_netdev
@_ConfigSync(shared=1)
def GetMasterNetmask(self):
"""Get the netmask of the master node for this cluster.
"""
return self._ConfigData().cluster.master_netmask
@_ConfigSync(shared=1)
def GetUseExternalMipScript(self):
"""Get flag representing whether to use the external master IP setup script.
"""
return self._ConfigData().cluster.use_external_mip_script
@_ConfigSync(shared=1)
def GetFileStorageDir(self):
"""Get the file storage dir for this cluster.
"""
return self._ConfigData().cluster.file_storage_dir
@_ConfigSync(shared=1)
def GetSharedFileStorageDir(self):
"""Get the shared file storage dir for this cluster.
"""
return self._ConfigData().cluster.shared_file_storage_dir
@_ConfigSync(shared=1)
def GetGlusterStorageDir(self):
"""Get the Gluster storage dir for this cluster.
"""
return self._ConfigData().cluster.gluster_storage_dir
@_ConfigSync(shared=1)
def GetHypervisorType(self):
"""Get the hypervisor type for this cluster.
"""
return self._ConfigData().cluster.enabled_hypervisors[0]
@_ConfigSync(shared=1)
def GetRsaHostKey(self):
"""Return the rsa hostkey from the config.
@rtype: string
@return: the rsa hostkey
"""
return self._ConfigData().cluster.rsahostkeypub
@_ConfigSync(shared=1)
def GetDsaHostKey(self):
"""Return the dsa hostkey from the config.
@rtype: string
@return: the dsa hostkey
"""
return self._ConfigData().cluster.dsahostkeypub
@_ConfigSync(shared=1)
def GetDefaultIAllocator(self):
"""Get the default instance allocator for this cluster.
"""
return self._ConfigData().cluster.default_iallocator
@_ConfigSync(shared=1)
def GetDefaultIAllocatorParameters(self):
"""Get the default instance allocator parameters for this cluster.
@rtype: dict
@return: dict of iallocator parameters
"""
return self._ConfigData().cluster.default_iallocator_params
@_ConfigSync(shared=1)
def GetPrimaryIPFamily(self):
"""Get cluster primary ip family.
@return: primary ip family
"""
return self._ConfigData().cluster.primary_ip_family
@_ConfigSync(shared=1)
def GetMasterNetworkParameters(self):
"""Get network parameters of the master node.
@rtype: L{object.MasterNetworkParameters}
@return: network parameters of the master node
"""
cluster = self._ConfigData().cluster
result = objects.MasterNetworkParameters(
uuid=cluster.master_node, ip=cluster.master_ip,
netmask=cluster.master_netmask, netdev=cluster.master_netdev,
ip_family=cluster.primary_ip_family)
return result
@_ConfigSync(shared=1)
def GetInstallImage(self):
"""Get the install image location
@rtype: string
@return: location of the install image
"""
return self._ConfigData().cluster.install_image
@_ConfigSync()
def SetInstallImage(self, install_image):
"""Set the install image location
@type install_image: string
@param install_image: location of the install image
"""
self._ConfigData().cluster.install_image = install_image
@_ConfigSync(shared=1)
def GetInstanceCommunicationNetwork(self):
"""Get cluster instance communication network
@rtype: string
@return: instance communication network, which is the name of the
network used for instance communication
"""
return self._ConfigData().cluster.instance_communication_network
@_ConfigSync()
def SetInstanceCommunicationNetwork(self, network_name):
"""Set cluster instance communication network
@type network_name: string
@param network_name: instance communication network, which is the name of
the network used for instance communication
"""
self._ConfigData().cluster.instance_communication_network = network_name
@_ConfigSync(shared=1)
def GetZeroingImage(self):
"""Get the zeroing image location
@rtype: string
@return: the location of the zeroing image
"""
return self._config_data.cluster.zeroing_image
@_ConfigSync(shared=1)
def GetCompressionTools(self):
"""Get cluster compression tools
@rtype: list of string
@return: a list of tools that are cleared for use in this cluster for the
purpose of compressing data
"""
return self._ConfigData().cluster.compression_tools
@_ConfigSync()
def SetCompressionTools(self, tools):
"""Set cluster compression tools
@type tools: list of string
@param tools: a list of tools that are cleared for use in this cluster for
the purpose of compressing data
"""
self._ConfigData().cluster.compression_tools = tools
@_ConfigSync()
def AddNodeGroup(self, group, ec_id, check_uuid=True):
"""Add a node group to the configuration.
This method calls group.UpgradeConfig() to fill any missing attributes
according to their default values.
@type group: L{objects.NodeGroup}
@param group: the NodeGroup object to add
@type ec_id: string
@param ec_id: unique id for the job to use when creating a missing UUID
@type check_uuid: bool
@param check_uuid: add an UUID to the group if it doesn't have one or, if
it does, ensure that it does not exist in the
configuration already
"""
self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
"""Add a node group to the configuration.
"""
logging.info("Adding node group %s to configuration", group.name)
# Some code might need to add a node group with a pre-populated UUID
# generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass
# the "does this UUID" exist already check.
if check_uuid:
self._EnsureUUID(group, ec_id)
try:
existing_uuid = self._UnlockedLookupNodeGroup(group.name)
except errors.OpPrereqError:
pass
else:
raise errors.OpPrereqError("Desired group name '%s' already exists as a"
" node group (UUID: %s)" %
(group.name, existing_uuid),
errors.ECODE_EXISTS)
group.serial_no = 1
group.ctime = group.mtime = time.time()
group.UpgradeConfig()
self._ConfigData().nodegroups[group.uuid] = group
self._ConfigData().cluster.serial_no += 1
@_ConfigSync()
def RemoveNodeGroup(self, group_uuid):
"""Remove a node group from the configuration.
@type group_uuid: string
@param group_uuid: the UUID of the node group to remove
"""
logging.info("Removing node group %s from configuration", group_uuid)
if group_uuid not in self._ConfigData().nodegroups:
raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid)
assert len(self._ConfigData().nodegroups) != 1, \
"Group '%s' is the only group, cannot be removed" % group_uuid
del self._ConfigData().nodegroups[group_uuid]
self._ConfigData().cluster.serial_no += 1
def _UnlockedLookupNodeGroup(self, target):
"""Lookup a node group's UUID.
@type target: string or None
@param target: group name or UUID or None to look for the default
@rtype: string
@return: nodegroup UUID
@raises errors.OpPrereqError: when the target group cannot be found
"""
if target is None:
if len(self._ConfigData().nodegroups) != 1:
raise errors.OpPrereqError("More than one node group exists. Target"
" group must be specified explicitly.")
else:
return self._ConfigData().nodegroups.keys()[0]
if target in self._ConfigData().nodegroups:
return target
for nodegroup in self._ConfigData().nodegroups.values():
if nodegroup.name == target:
return nodegroup.uuid
raise errors.OpPrereqError("Node group '%s' not found" % target,
errors.ECODE_NOENT)
@_ConfigSync(shared=1)
def LookupNodeGroup(self, target):
"""Lookup a node group's UUID.
This function is just a wrapper over L{_UnlockedLookupNodeGroup}.
@type target: string or None
@param target: group name or UUID or None to look for the default
@rtype: string
@return: nodegroup UUID
"""
return self._UnlockedLookupNodeGroup(target)
def _UnlockedGetNodeGroup(self, uuid):
"""Lookup a node group.
@type uuid: string
@param uuid: group UUID
@rtype: L{objects.NodeGroup} or None
@return: nodegroup object, or None if not found
"""
if uuid not in self._ConfigData().nodegroups:
return None
return self._ConfigData().nodegroups[uuid]
@_ConfigSync(shared=1)
def GetNodeGroup(self, uuid):
"""Lookup a node group.
@type uuid: string
@param uuid: group UUID
@rtype: L{objects.NodeGroup} or None
@return: nodegroup object, or None if not found
"""
return self._UnlockedGetNodeGroup(uuid)
def _UnlockedGetAllNodeGroupsInfo(self):
"""Get the configuration of all node groups.
"""
return dict(self._ConfigData().nodegroups)
@_ConfigSync(shared=1)
def GetAllNodeGroupsInfo(self):
"""Get the configuration of all node groups.
"""
return self._UnlockedGetAllNodeGroupsInfo()
@_ConfigSync(shared=1)
def GetAllNodeGroupsInfoDict(self):
"""Get the configuration of all node groups expressed as a dictionary of
dictionaries.
"""
return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()),
self._UnlockedGetAllNodeGroupsInfo().items()))
@_ConfigSync(shared=1)
def GetNodeGroupList(self):
"""Get a list of node groups.
"""
return self._ConfigData().nodegroups.keys()
@_ConfigSync(shared=1)
def GetNodeGroupMembersByNodes(self, nodes):
"""Get nodes which are member in the same nodegroups as the given nodes.
"""
ngfn = lambda node_uuid: self._UnlockedGetNodeInfo(node_uuid).group
return frozenset(member_uuid
for node_uuid in nodes
for member_uuid in
self._UnlockedGetNodeGroup(ngfn(node_uuid)).members)
@_ConfigSync(shared=1)
def GetMultiNodeGroupInfo(self, group_uuids):
"""Get the configuration of multiple node groups.
@param group_uuids: List of node group UUIDs
@rtype: list
@return: List of tuples of (group_uuid, group_info)
"""
return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
@_ConfigSync()
def AddInstance(self, instance, ec_id):
"""Add an instance to the config.
This should be used after creating a new instance.
@type instance: L{objects.Instance}
@param instance: the instance object
"""
if not isinstance(instance, objects.Instance):
raise errors.ProgrammerError("Invalid type passed to AddInstance")
all_macs = self._AllMACs()
for nic in instance.nics:
if nic.mac in all_macs:
raise errors.ConfigurationError("Cannot add instance %s:"
" MAC address '%s' already in use." %
(instance.name, nic.mac))
self._CheckUniqueUUID(instance, include_temporary=False)
instance.serial_no = 1
instance.ctime = instance.mtime = time.time()
self._ConfigData().instances[instance.uuid] = instance
self._ConfigData().cluster.serial_no += 1
self._UnlockedReleaseDRBDMinors(instance.uuid)
# FIXME: After RemoveInstance is moved to WConfd, use its internal
# function from TempRes module instead.
self._UnlockedCommitTemporaryIps(ec_id)
def _EnsureUUID(self, item, ec_id):
"""Ensures a given object has a valid UUID.
@param item: the instance or node to be checked
@param ec_id: the execution context id for the uuid reservation
"""
if not item.uuid:
item.uuid = self._GenerateUniqueID(ec_id)
else:
self._CheckUniqueUUID(item, include_temporary=True)
def _CheckUniqueUUID(self, item, include_temporary):
"""Checks that the UUID of the given object is unique.
@param item: the instance or node to be checked
@param include_temporary: whether temporarily generated UUID's should be
included in the check. If the UUID of the item to be checked is
a temporarily generated one, this has to be C{False}.
"""
if not item.uuid:
raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,))
if item.uuid in self._AllIDs(include_temporary=include_temporary):
raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
" in use" % (item.name, item.uuid))
def _SetInstanceStatus(self, inst_uuid, status, disks_active,
admin_state_source):
"""Set the instance's status to a given value.
@rtype: L{objects.Instance}
@return: the updated instance object
"""
if inst_uuid not in self._ConfigData().instances:
raise errors.ConfigurationError("Unknown instance '%s'" %
inst_uuid)
instance = self._ConfigData().instances[inst_uuid]
if status is None:
status = instance.admin_state
if disks_active is None:
disks_active = instance.disks_active
if admin_state_source is None:
admin_state_source = instance.admin_state_source
assert status in constants.ADMINST_ALL, \
"Invalid status '%s' passed to SetInstanceStatus" % (status,)
if instance.admin_state != status or \
instance.disks_active != disks_active or \
instance.admin_state_source != admin_state_source:
instance.admin_state = status
instance.disks_active = disks_active
instance.admin_state_source = admin_state_source
instance.serial_no += 1
instance.mtime = time.time()
return instance
@_ConfigSync()
def MarkInstanceUp(self, inst_uuid):
"""Mark the instance status to up in the config.
This also sets the instance disks active flag.
@rtype: L{objects.Instance}
@return: the updated instance object
"""
return self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True,
constants.ADMIN_SOURCE)
@_ConfigSync()
def MarkInstanceOffline(self, inst_uuid):
"""Mark the instance status to down in the config.
This also clears the instance disks active flag.
@rtype: L{objects.Instance}
@return: the updated instance object
"""
return self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False,
constants.ADMIN_SOURCE)
@_ConfigSync()
def RemoveInstance(self, inst_uuid):
"""Remove the instance from the configuration.
"""
if inst_uuid not in self._ConfigData().instances:
raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
# If a network port has been allocated to the instance,
# return it to the pool of free ports.
inst = self._ConfigData().instances[inst_uuid]
network_port = getattr(inst, "network_port", None)
if network_port is not None:
self._ConfigData().cluster.tcpudp_port_pool.add(network_port)
instance = self._UnlockedGetInstanceInfo(inst_uuid)
# FIXME: After RemoveInstance is moved to WConfd, use its internal
# function from TempRes module.
for nic in instance.nics:
if nic.network and nic.ip:
# Return all IP addresses to the respective address pools
self._UnlockedCommitIp(constants.RELEASE_ACTION, nic.network, nic.ip)
del self._ConfigData().instances[inst_uuid]
self._ConfigData().cluster.serial_no += 1
@_ConfigSync()
def RenameInstance(self, inst_uuid, new_name):
"""Rename an instance.
This needs to be done in ConfigWriter and not by RemoveInstance
combined with AddInstance as only we can guarantee an atomic
rename.
"""
if inst_uuid not in self._ConfigData().instances:
raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
inst = self._ConfigData().instances[inst_uuid]
inst.name = new_name
instance_disks = self._UnlockedGetInstanceDisks(inst_uuid)
for (_, disk) in enumerate(instance_disks):
if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]:
# rename the file paths in logical and physical id
file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
disk.logical_id = (disk.logical_id[0],
utils.PathJoin(file_storage_dir, inst.name,
os.path.basename(disk.logical_id[1])))
# Force update of ssconf files
self._ConfigData().cluster.serial_no += 1
@_ConfigSync()
def MarkInstanceDown(self, inst_uuid):
"""Mark the status of an instance to down in the configuration.
This does not touch the instance disks active flag, as shut down instances
can still have active disks.
@rtype: L{objects.Instance}
@return: the updated instance object
"""
return self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None,
constants.ADMIN_SOURCE)
@_ConfigSync()
def MarkInstanceUserDown(self, inst_uuid):
"""Mark the status of an instance to user down in the configuration.
This does not touch the instance disks active flag, as user shut
down instances can still have active disks.
"""
self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None,
constants.USER_SOURCE)
@_ConfigSync()
def MarkInstanceDisksActive(self, inst_uuid):
"""Mark the status of instance disks active.
@rtype: L{objects.Instance}
@return: the updated instance object
"""
return self._SetInstanceStatus(inst_uuid, None, True, None)
@_ConfigSync()
def MarkInstanceDisksInactive(self, inst_uuid):
"""Mark the status of instance disks inactive.
@rtype: L{objects.Instance}
@return: the updated instance object
"""
return self._SetInstanceStatus(inst_uuid, None, False, None)
def _UnlockedGetInstanceList(self):
"""Get the list of instances.
This function is for internal use, when the config lock is already held.
"""
return self._ConfigData().instances.keys()
@_ConfigSync(shared=1)
def GetInstanceList(self):
"""Get the list of instances.
@return: array of instances, ex. ['instance2-uuid', 'instance1-uuid']
"""
return self._UnlockedGetInstanceList()
def ExpandInstanceName(self, short_name):
"""Attempt to expand an incomplete instance name.
"""
# Locking is done in L{ConfigWriter.GetAllInstancesInfo}
all_insts = self.GetAllInstancesInfo().values()
expanded_name = _MatchNameComponentIgnoreCase(
short_name, [inst.name for inst in all_insts])
if expanded_name is not None:
# there has to be exactly one instance with that name
inst = (filter(lambda n: n.name == expanded_name, all_insts)[0])
return (inst.uuid, inst.name)
else:
return (None, None)
def _UnlockedGetInstanceInfo(self, inst_uuid):
"""Returns information about an instance.
This function is for internal use, when the config lock is already held.
"""
if inst_uuid not in self._ConfigData().instances:
return None
return self._ConfigData().instances[inst_uuid]
@_ConfigSync(shared=1)
def GetInstanceInfo(self, inst_uuid):
"""Returns information about an instance.
It takes the information from the configuration file. Other information of
an instance are taken from the live systems.
@param inst_uuid: UUID of the instance
@rtype: L{objects.Instance}
@return: the instance object
"""
return self._UnlockedGetInstanceInfo(inst_uuid)
@_ConfigSync(shared=1)
def GetInstanceNodeGroups(self, inst_uuid, primary_only=False):
"""Returns set of node group UUIDs for instance's nodes.
@rtype: frozenset
"""
instance = self._UnlockedGetInstanceInfo(inst_uuid)
if not instance:
raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
if primary_only:
nodes = [instance.primary_node]
else:
nodes = self._UnlockedGetInstanceNodes(instance.uuid)
return frozenset(self._UnlockedGetNodeInfo(node_uuid).group
for node_uuid in nodes)
@_ConfigSync(shared=1)
def GetInstanceNetworks(self, inst_uuid):
"""Returns set of network UUIDs for instance's nics.
@rtype: frozenset
"""
instance = self._UnlockedGetInstanceInfo(inst_uuid)
if not instance:
raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
networks = set()
for nic in instance.nics:
if nic.network:
networks.add(nic.network)
return frozenset(networks)
@_ConfigSync(shared=1)
def GetMultiInstanceInfo(self, inst_uuids):
"""Get the configuration of multiple instances.
@param inst_uuids: list of instance UUIDs
@rtype: list
@return: list of tuples (instance UUID, instance_info), where
instance_info is what would GetInstanceInfo return for the
node, while keeping the original order
"""
return [(uuid, self._UnlockedGetInstanceInfo(uuid)) for uuid in inst_uuids]
@_ConfigSync(shared=1)
def GetMultiInstanceInfoByName(self, inst_names):
"""Get the configuration of multiple instances.
@param inst_names: list of instance names
@rtype: list
@return: list of tuples (instance, instance_info), where
instance_info is what would GetInstanceInfo return for the
node, while keeping the original order
"""
result = []
for name in inst_names:
instance = self._UnlockedGetInstanceInfoByName(name)
result.append((instance.uuid, instance))
return result
@_ConfigSync(shared=1)
def GetAllInstancesInfo(self):
"""Get the configuration of all instances.
@rtype: dict
@return: dict of (instance, instance_info), where instance_info is what
would GetInstanceInfo return for the node
"""
return self._UnlockedGetAllInstancesInfo()
def _UnlockedGetAllInstancesInfo(self):
my_dict = dict([(inst_uuid, self._UnlockedGetInstanceInfo(inst_uuid))
for inst_uuid in self._UnlockedGetInstanceList()])
return my_dict
@_ConfigSync(shared=1)
def GetInstancesInfoByFilter(self, filter_fn):
"""Get instance configuration with a filter.
@type filter_fn: callable
@param filter_fn: Filter function receiving instance object as parameter,
returning boolean. Important: this function is called while the
configuration locks is held. It must not do any complex work or call
functions potentially leading to a deadlock. Ideally it doesn't call any
other functions and just compares instance attributes.
"""
return dict((uuid, inst)
for (uuid, inst) in self._ConfigData().instances.items()
if filter_fn(inst))
@_ConfigSync(shared=1)
def GetInstanceInfoByName(self, inst_name):
"""Get the L{objects.Instance} object for a named instance.
@param inst_name: name of the instance to get information for
@type inst_name: string
@return: the corresponding L{objects.Instance} instance or None if no
information is available
"""
return self._UnlockedGetInstanceInfoByName(inst_name)
def _UnlockedGetInstanceInfoByName(self, inst_name):
for inst in self._UnlockedGetAllInstancesInfo().values():
if inst.name == inst_name:
return inst
return None
def _UnlockedGetInstanceName(self, inst_uuid):
inst_info = self._UnlockedGetInstanceInfo(inst_uuid)
if inst_info is None:
raise errors.OpExecError("Unknown instance: %s" % inst_uuid)
return inst_info.name
@_ConfigSync(shared=1)
def GetInstanceName(self, inst_uuid):
"""Gets the instance name for the passed instance.
@param inst_uuid: instance UUID to get name for
@type inst_uuid: string
@rtype: string
@return: instance name
"""
return self._UnlockedGetInstanceName(inst_uuid)
@_ConfigSync(shared=1)
def GetInstanceNames(self, inst_uuids):
"""Gets the instance names for the passed list of nodes.
@param inst_uuids: list of instance UUIDs to get names for
@type inst_uuids: list of strings
@rtype: list of strings
@return: list of instance names
"""
return self._UnlockedGetInstanceNames(inst_uuids)
@_ConfigSync()
def SetInstancePrimaryNode(self, inst_uuid, target_node_uuid):
"""Sets the primary node of an existing instance
@param inst_uuid: instance UUID
@type inst_uuid: string
@param target_node_uuid: the new primary node UUID
@type target_node_uuid: string
"""
self._UnlockedGetInstanceInfo(inst_uuid).primary_node = target_node_uuid
@_ConfigSync()
def SetDiskNodes(self, disk_uuid, nodes):
"""Sets the nodes of an existing disk
@param disk_uuid: disk UUID
@type disk_uuid: string
@param nodes: the new nodes for the disk
@type nodes: list of node uuids
"""
self._UnlockedGetDiskInfo(disk_uuid).nodes = nodes
def _UnlockedGetInstanceNames(self, inst_uuids):
return [self._UnlockedGetInstanceName(uuid) for uuid in inst_uuids]
def _UnlockedAddNode(self, node, ec_id):
"""Add a node to the configuration.
@type node: L{objects.Node}
@param node: a Node instance
"""
logging.info("Adding node %s to configuration", node.name)
self._EnsureUUID(node, ec_id)
node.serial_no = 1
node.ctime = node.mtime = time.time()
self._UnlockedAddNodeToGroup(node.uuid, node.group)
assert node.uuid in self._ConfigData().nodegroups[node.group].members
self._ConfigData().nodes[node.uuid] = node
self._ConfigData().cluster.serial_no += 1
@_ConfigSync()
def AddNode(self, node, ec_id):
"""Add a node to the configuration.
@type node: L{objects.Node}
@param node: a Node instance
"""
self._UnlockedAddNode(node, ec_id)
@_ConfigSync()
def RemoveNode(self, node_uuid):
"""Remove a node from the configuration.
"""
logging.info("Removing node %s from configuration", node_uuid)
if node_uuid not in self._ConfigData().nodes:
raise errors.ConfigurationError("Unknown node '%s'" % node_uuid)
self._UnlockedRemoveNodeFromGroup(self._ConfigData().nodes[node_uuid])
del self._ConfigData().nodes[node_uuid]
self._ConfigData().cluster.serial_no += 1
def ExpandNodeName(self, short_name):
"""Attempt to expand an incomplete node name into a node UUID.
"""
# Locking is done in L{ConfigWriter.GetAllNodesInfo}
all_nodes = self.GetAllNodesInfo().values()
expanded_name = _MatchNameComponentIgnoreCase(
short_name, [node.name for node in all_nodes])
if expanded_name is not None:
# there has to be exactly one node with that name
node = (filter(lambda n: n.name == expanded_name, all_nodes)[0])
return (node.uuid, node.name)
else:
return (None, None)
def _UnlockedGetNodeInfo(self, node_uuid):
"""Get the configuration of a node, as stored in the config.
This function is for internal use, when the config lock is already
held.
@param node_uuid: the node UUID
@rtype: L{objects.Node}
@return: the node object
"""
if node_uuid not in self._ConfigData().nodes:
return None
return self._ConfigData().nodes[node_uuid]
@_ConfigSync(shared=1)
def GetNodeInfo(self, node_uuid):
"""Get the configuration of a node, as stored in the config.
This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
@param node_uuid: the node UUID
@rtype: L{objects.Node}
@return: the node object
"""
return self._UnlockedGetNodeInfo(node_uuid)
@_ConfigSync(shared=1)
def GetNodeInstances(self, node_uuid):
"""Get the instances of a node, as stored in the config.
@param node_uuid: the node UUID
@rtype: (list, list)
@return: a tuple with two lists: the primary and the secondary instances
"""
pri = []
sec = []
for inst in self._ConfigData().instances.values():
if inst.primary_node == node_uuid:
pri.append(inst.uuid)
if node_uuid in self._UnlockedGetInstanceSecondaryNodes(inst.uuid):
sec.append(inst.uuid)
return (pri, sec)
@_ConfigSync(shared=1)
def GetNodeGroupInstances(self, uuid, primary_only=False):
"""Get the instances of a node group.
@param uuid: Node group UUID
@param primary_only: Whether to only consider primary nodes
@rtype: frozenset
@return: List of instance UUIDs in node group
"""
if primary_only:
nodes_fn = lambda inst: [inst.primary_node]
else:
nodes_fn = lambda inst: self._UnlockedGetInstanceNodes(inst.uuid)
return frozenset(inst.uuid
for inst in self._ConfigData().instances.values()
for node_uuid in nodes_fn(inst)
if self._UnlockedGetNodeInfo(node_uuid).group == uuid)
def _UnlockedGetHvparamsString(self, hvname):
"""Return the string representation of the list of hyervisor parameters of
the given hypervisor.
@see: C{GetHvparams}
"""
result = ""
hvparams = self._ConfigData().cluster.hvparams[hvname]
for key in hvparams:
result += "%s=%s\n" % (key, hvparams[key])
return result
@_ConfigSync(shared=1)
def GetHvparamsString(self, hvname):
"""Return the hypervisor parameters of the given hypervisor.
@type hvname: string
@param hvname: name of a hypervisor
@rtype: string
@return: string containing key-value-pairs, one pair on each line;
format: KEY=VALUE
"""
return self._UnlockedGetHvparamsString(hvname)
def _UnlockedGetNodeList(self):
"""Return the list of nodes which are in the configuration.
This function is for internal use, when the config lock is already
held.
@rtype: list
"""
return self._ConfigData().nodes.keys()
@_ConfigSync(shared=1)
def GetNodeList(self):
"""Return the list of nodes which are in the configuration.
"""
return self._UnlockedGetNodeList()
def _UnlockedGetOnlineNodeList(self):
"""Return the list of nodes which are online.
"""
all_nodes = [self._UnlockedGetNodeInfo(node)
for node in self._UnlockedGetNodeList()]
return [node.uuid for node in all_nodes if not node.offline]
@_ConfigSync(shared=1)
def GetOnlineNodeList(self):
"""Return the list of nodes which are online.
"""
return self._UnlockedGetOnlineNodeList()
@_ConfigSync(shared=1)
def GetVmCapableNodeList(self):
"""Return the list of nodes which are not vm capable.
"""
all_nodes = [self._UnlockedGetNodeInfo(node)
for node in self._UnlockedGetNodeList()]
return [node.uuid for node in all_nodes if node.vm_capable]
@_ConfigSync(shared=1)
def GetNonVmCapableNodeList(self):
"""Return the list of nodes' uuids which are not vm capable.
"""
all_nodes = [self._UnlockedGetNodeInfo(node)
for node in self._UnlockedGetNodeList()]
return [node.uuid for node in all_nodes if not node.vm_capable]
@_ConfigSync(shared=1)
def GetNonVmCapableNodeNameList(self):
"""Return the list of nodes' names which are not vm capable.
"""
all_nodes = [self._UnlockedGetNodeInfo(node)
for node in self._UnlockedGetNodeList()]
return [node.name for node in all_nodes if not node.vm_capable]
@_ConfigSync(shared=1)
def GetMultiNodeInfo(self, node_uuids):
"""Get the configuration of multiple nodes.
@param node_uuids: list of node UUIDs
@rtype: list
@return: list of tuples of (node, node_info), where node_info is
what would GetNodeInfo return for the node, in the original
order
"""
return [(uuid, self._UnlockedGetNodeInfo(uuid)) for uuid in node_uuids]
def _UnlockedGetAllNodesInfo(self):
"""Gets configuration of all nodes.
@note: See L{GetAllNodesInfo}
"""
return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid))
for node_uuid in self._UnlockedGetNodeList()])
@_ConfigSync(shared=1)
def GetAllNodesInfo(self):
"""Get the configuration of all nodes.
@rtype: dict
@return: dict of (node, node_info), where node_info is what
would GetNodeInfo return for the node
"""
return self._UnlockedGetAllNodesInfo()
def _UnlockedGetNodeInfoByName(self, node_name):
for node in self._UnlockedGetAllNodesInfo().values():
if node.name == node_name:
return node
return None
@_ConfigSync(shared=1)
def GetNodeInfoByName(self, node_name):
"""Get the L{objects.Node} object for a named node.
@param node_name: name of the node to get information for
@type node_name: string
@return: the corresponding L{objects.Node} instance or None if no
information is available
"""
return self._UnlockedGetNodeInfoByName(node_name)
@_ConfigSync(shared=1)
def GetNodeGroupInfoByName(self, nodegroup_name):
"""Get the L{objects.NodeGroup} object for a named node group.
@param nodegroup_name: name of the node group to get information for
@type nodegroup_name: string
@return: the corresponding L{objects.NodeGroup} instance or None if no
information is available
"""
for nodegroup in self._UnlockedGetAllNodeGroupsInfo().values():
if nodegroup.name == nodegroup_name:
return nodegroup
return None
def _UnlockedGetNodeName(self, node_spec):
if isinstance(node_spec, objects.Node):
return node_spec.name
elif isinstance(node_spec, basestring):
node_info = self._UnlockedGetNodeInfo(node_spec)
if node_info is None:
raise errors.OpExecError("Unknown node: %s" % node_spec)
return node_info.name
else:
raise errors.ProgrammerError("Can't handle node spec '%s'" % node_spec)
@_ConfigSync(shared=1)
def GetNodeName(self, node_spec):
"""Gets the node name for the passed node.
@param node_spec: node to get names for
@type node_spec: either node UUID or a L{objects.Node} object
@rtype: string
@return: node name
"""
return self._UnlockedGetNodeName(node_spec)
def _UnlockedGetNodeNames(self, node_specs):
return [self._UnlockedGetNodeName(node_spec) for node_spec in node_specs]
@_ConfigSync(shared=1)
def GetNodeNames(self, node_specs):
"""Gets the node names for the passed list of nodes.
@param node_specs: list of nodes to get names for
@type node_specs: list of either node UUIDs or L{objects.Node} objects
@rtype: list of strings
@return: list of node names
"""
return self._UnlockedGetNodeNames(node_specs)
@_ConfigSync(shared=1)
def GetNodeGroupsFromNodes(self, node_uuids):
"""Returns groups for a list of nodes.
@type node_uuids: list of string
@param node_uuids: List of node UUIDs
@rtype: frozenset
"""
return frozenset(self._UnlockedGetNodeInfo(uuid).group
for uuid in node_uuids)
def _UnlockedGetMasterCandidateUuids(self):
"""Get the list of UUIDs of master candidates.
@rtype: list of strings
@return: list of UUIDs of all master candidates.
"""
return [node.uuid for node in self._ConfigData().nodes.values()
if node.master_candidate]
@_ConfigSync(shared=1)
def GetMasterCandidateUuids(self):
"""Get the list of UUIDs of master candidates.
@rtype: list of strings
@return: list of UUIDs of all master candidates.
"""
return self._UnlockedGetMasterCandidateUuids()
def _UnlockedGetMasterCandidateStats(self, exceptions=None):
"""Get the number of current and maximum desired and possible candidates.
@type exceptions: list
@param exceptions: if passed, list of nodes that should be ignored
@rtype: tuple
@return: tuple of (current, desired and possible, possible)
"""
mc_now = mc_should = mc_max = 0
for node in self._ConfigData().nodes.values():
if exceptions and node.uuid in exceptions:
continue
if not (node.offline or node.drained) and node.master_capable:
mc_max += 1
if node.master_candidate:
mc_now += 1
mc_should = min(mc_max, self._ConfigData().cluster.candidate_pool_size)
return (mc_now, mc_should, mc_max)
@_ConfigSync(shared=1)
def GetMasterCandidateStats(self, exceptions=None):
"""Get the number of current and maximum possible candidates.
This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
@type exceptions: list
@param exceptions: if passed, list of nodes that should be ignored
@rtype: tuple
@return: tuple of (current, max)
"""
return self._UnlockedGetMasterCandidateStats(exceptions)
@_ConfigSync()
def MaintainCandidatePool(self, exception_node_uuids):
"""Try to grow the candidate pool to the desired size.
@type exception_node_uuids: list
@param exception_node_uuids: if passed, list of nodes that should be ignored
@rtype: list
@return: list with the adjusted nodes (L{objects.Node} instances)
"""
mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(
exception_node_uuids)
mod_list = []
if mc_now < mc_max:
node_list = self._ConfigData().nodes.keys()
random.shuffle(node_list)
for uuid in node_list:
if mc_now >= mc_max:
break
node = self._ConfigData().nodes[uuid]
if (node.master_candidate or node.offline or node.drained or
node.uuid in exception_node_uuids or not node.master_capable):
continue
mod_list.append(node)
node.master_candidate = True
node.serial_no += 1
mc_now += 1
if mc_now != mc_max:
# this should not happen
logging.warning("Warning: MaintainCandidatePool didn't manage to"
" fill the candidate pool (%d/%d)", mc_now, mc_max)
if mod_list:
self._ConfigData().cluster.serial_no += 1
return mod_list
def _UnlockedAddNodeToGroup(self, node_uuid, nodegroup_uuid):
"""Add a given node to the specified group.
"""
if nodegroup_uuid not in self._ConfigData().nodegroups:
# This can happen if a node group gets deleted between its lookup and
# when we're adding the first node to it, since we don't keep a lock in
# the meantime. It's ok though, as we'll fail cleanly if the node group
# is not found anymore.
raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid)
if node_uuid not in self._ConfigData().nodegroups[nodegroup_uuid].members:
self._ConfigData().nodegroups[nodegroup_uuid].members.append(node_uuid)
def _UnlockedRemoveNodeFromGroup(self, node):
"""Remove a given node from its group.
"""
nodegroup = node.group
if nodegroup not in self._ConfigData().nodegroups:
logging.warning("Warning: node '%s' has unknown node group '%s'"
" (while being removed from it)", node.uuid, nodegroup)
nodegroup_obj = self._ConfigData().nodegroups[nodegroup]
if node.uuid not in nodegroup_obj.members:
logging.warning("Warning: node '%s' not a member of its node group '%s'"
" (while being removed from it)", node.uuid, nodegroup)
else:
nodegroup_obj.members.remove(node.uuid)
@_ConfigSync()
def AssignGroupNodes(self, mods):
"""Changes the group of a number of nodes.
@type mods: list of tuples; (node name, new group UUID)
@param mods: Node membership modifications
"""
groups = self._ConfigData().nodegroups
nodes = self._ConfigData().nodes
resmod = []
# Try to resolve UUIDs first
for (node_uuid, new_group_uuid) in mods:
try:
node = nodes[node_uuid]
except KeyError:
raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid)
if node.group == new_group_uuid:
# Node is being assigned to its current group
logging.debug("Node '%s' was assigned to its current group (%s)",
node_uuid, node.group)
continue
# Try to find current group of node
try:
old_group = groups[node.group]
except KeyError:
raise errors.ConfigurationError("Unable to find old group '%s'" %
node.group)
# Try to find new group for node
try:
new_group = groups[new_group_uuid]
except KeyError:
raise errors.ConfigurationError("Unable to find new group '%s'" %
new_group_uuid)
assert node.uuid in old_group.members, \
("Inconsistent configuration: node '%s' not listed in members for its"
" old group '%s'" % (node.uuid, old_group.uuid))
assert node.uuid not in new_group.members, \
("Inconsistent configuration: node '%s' already listed in members for"
" its new group '%s'" % (node.uuid, new_group.uuid))
resmod.append((node, old_group, new_group))
# Apply changes
for (node, old_group, new_group) in resmod:
assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \
"Assigning to current group is not possible"
node.group = new_group.uuid
# Update members of involved groups
if node.uuid in old_group.members:
old_group.members.remove(node.uuid)
if node.uuid not in new_group.members:
new_group.members.append(node.uuid)
# Update timestamps and serials (only once per node/group object)
now = time.time()
for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142
obj.serial_no += 1
obj.mtime = now
# Force ssconf update
self._ConfigData().cluster.serial_no += 1
def _BumpSerialNo(self):
"""Bump up the serial number of the config.
"""
self._ConfigData().serial_no += 1
self._ConfigData().mtime = time.time()
def _AllUUIDObjects(self):
"""Returns all objects with uuid attributes.
"""
return (self._ConfigData().instances.values() +
self._ConfigData().nodes.values() +
self._ConfigData().nodegroups.values() +
self._ConfigData().networks.values() +
self._ConfigData().disks.values() +
self._AllNICs() +
[self._ConfigData().cluster])
def GetConfigManager(self, shared=False):
"""Returns a ConfigManager, which is suitable to perform a synchronized
block of configuration operations.
WARNING: This blocks all other configuration operations, so anything that
runs inside the block should be very fast, preferably not using any IO.
"""
return ConfigManager(self, shared)
def _AddLockCount(self, count):
self._lock_count += count
return self._lock_count
def _LockCount(self):
return self._lock_count
def _OpenConfig(self, shared):
"""Read the config data from WConfd or disk.
"""
if self._AddLockCount(1) > 1:
if self._lock_current_shared and not shared:
self._AddLockCount(-1)
raise errors.ConfigurationError("Can't request an exclusive"
" configuration lock while holding"
" shared")
else:
return # we already have the lock, do nothing
else:
self._lock_current_shared = shared
# Read the configuration data. If offline, read the file directly.
# If online, call WConfd.
if self._offline:
try:
raw_data = utils.ReadFile(self._cfg_file)
data_dict = serializer.Load(raw_data)
# Make sure the configuration has the right version
_ValidateConfig(data_dict)
data = objects.ConfigData.FromDict(data_dict)
except errors.ConfigVersionMismatch:
raise
except Exception, err:
raise errors.ConfigurationError(err)
self._cfg_id = utils.GetFileID(path=self._cfg_file)
if (not hasattr(data, "cluster") or
not hasattr(data.cluster, "rsahostkeypub")):
raise errors.ConfigurationError("Incomplete configuration"
" (missing cluster.rsahostkeypub)")
if not data.cluster.master_node in data.nodes:
msg = ("The configuration denotes node %s as master, but does not"
" contain information about this node" %
data.cluster.master_node)
raise errors.ConfigurationError(msg)
master_info = data.nodes[data.cluster.master_node]
if master_info.name != self._my_hostname and not self._accept_foreign:
msg = ("The configuration denotes node %s as master, while my"
" hostname is %s; opening a foreign configuration is only"
" possible in accept_foreign mode" %
(master_info.name, self._my_hostname))
raise errors.ConfigurationError(msg)
self._SetConfigData(data)
# Upgrade configuration if needed
self._UpgradeConfig(saveafter=True)
else:
if shared:
if self._config_data is None:
logging.debug("Requesting config, as I have no up-to-date copy")
dict_data = self._wconfd.ReadConfig()
else:
logging.debug("My config copy is up to date.")
dict_data = None
else:
# poll until we acquire the lock
while True:
dict_data = \
self._wconfd.LockConfig(self._GetWConfdContext(), bool(shared))
logging.debug("Received config from WConfd.LockConfig [shared=%s]",
bool(shared))
if dict_data is not None:
break
time.sleep(random.random())
try:
if dict_data is not None:
self._SetConfigData(objects.ConfigData.FromDict(dict_data))
except Exception, err:
raise errors.ConfigurationError(err)
# Transitional fix until ConfigWriter is completely rewritten into
# Haskell
self._UpgradeConfig()
def _CloseConfig(self, save):
"""Release resources relating the config data.
"""
if self._AddLockCount(-1) > 0:
return # we still have the lock, do nothing
if save:
try:
logging.debug("Writing configuration and unlocking it")
self._WriteConfig(releaselock=True)
except Exception, err:
logging.critical("Can't write the configuration: %s", str(err))
raise
elif not self._offline:
logging.debug("Unlocking configuration without writing")
self._wconfd.UnlockConfig(self._GetWConfdContext())
# TODO: To WConfd
def _UpgradeConfig(self, saveafter=False):
"""Run any upgrade steps.
This method performs both in-object upgrades and also update some data
elements that need uniqueness across the whole configuration or interact
with other objects.
@warning: if 'saveafter' is 'True', this function will call
L{_WriteConfig()} so it needs to be called only from a
"safe" place.
"""
# Keep a copy of the persistent part of _config_data to check for changes
# Serialization doesn't guarantee order in dictionaries
oldconf = copy.deepcopy(self._ConfigData().ToDict())
# In-object upgrades
self._ConfigData().UpgradeConfig()
for item in self._AllUUIDObjects():
if item.uuid is None:
item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
if not self._ConfigData().nodegroups:
default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME
default_nodegroup = objects.NodeGroup(name=default_nodegroup_name,
members=[])
self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True)
for node in self._ConfigData().nodes.values():
if not node.group:
node.group = self._UnlockedLookupNodeGroup(None)
# This is technically *not* an upgrade, but needs to be done both when
# nodegroups are being added, and upon normally loading the config,
# because the members list of a node group is discarded upon
# serializing/deserializing the object.
self._UnlockedAddNodeToGroup(node.uuid, node.group)
modified = (oldconf != self._ConfigData().ToDict())
if modified and saveafter:
self._WriteConfig()
self._UnlockedDropECReservations(_UPGRADE_CONFIG_JID)
else:
if self._offline:
self._UnlockedVerifyConfigAndLog()
def _WriteConfig(self, destination=None, releaselock=False):
"""Write the configuration data to persistent storage.
"""
if destination is None:
destination = self._cfg_file
# Save the configuration data. If offline, write the file directly.
# If online, call WConfd.
if self._offline:
self._BumpSerialNo()
txt = serializer.DumpJson(
self._ConfigData().ToDict(_with_private=True),
private_encoder=serializer.EncodeWithPrivateFields
)
getents = self._getents()
try:
fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt,
close=False, gid=getents.confd_gid, mode=0640)
except errors.LockError:
raise errors.ConfigurationError("The configuration file has been"
" modified since the last write, cannot"
" update")
try:
self._cfg_id = utils.GetFileID(fd=fd)
finally:
os.close(fd)
else:
try:
if releaselock:
self._wconfd.WriteConfigAndUnlock(self._GetWConfdContext(),
self._ConfigData().ToDict())
else:
self._wconfd.WriteConfig(self._GetWConfdContext(),
self._ConfigData().ToDict())
except errors.LockError:
raise errors.ConfigurationError("The configuration file has been"
" modified since the last write, cannot"
" update")
self.write_count += 1
def _GetAllHvparamsStrings(self, hypervisors):
"""Get the hvparams of all given hypervisors from the config.
@type hypervisors: list of string
@param hypervisors: list of hypervisor names
@rtype: dict of strings
@returns: dictionary mapping the hypervisor name to a string representation
of the hypervisor's hvparams
"""
hvparams = {}
for hv in hypervisors:
hvparams[hv] = self._UnlockedGetHvparamsString(hv)
return hvparams
@staticmethod
def _ExtendByAllHvparamsStrings(ssconf_values, all_hvparams):
"""Extends the ssconf_values dictionary by hvparams.
@type ssconf_values: dict of strings
@param ssconf_values: dictionary mapping ssconf_keys to strings
representing the content of ssconf files
@type all_hvparams: dict of strings
@param all_hvparams: dictionary mapping hypervisor names to a string
representation of their hvparams
@rtype: same as ssconf_values
@returns: the ssconf_values dictionary extended by hvparams
"""
for hv in all_hvparams:
ssconf_key = constants.SS_HVPARAMS_PREF + hv
ssconf_values[ssconf_key] = all_hvparams[hv]
return ssconf_values
def _UnlockedGetSsconfValues(self):
"""Return the values needed by ssconf.
@rtype: dict
@return: a dictionary with keys the ssconf names and values their
associated value
"""
fn = "\n".join
instance_names = utils.NiceSort(
[inst.name for inst in
self._UnlockedGetAllInstancesInfo().values()])
node_infos = self._UnlockedGetAllNodesInfo().values()
node_names = [node.name for node in node_infos]
node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
for ninfo in node_infos]
node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
for ninfo in node_infos]
node_vm_capable = ["%s=%s" % (ninfo.name, str(ninfo.vm_capable))
for ninfo in node_infos]
instance_data = fn(instance_names)
off_data = fn(node.name for node in node_infos if node.offline)
on_data = fn(node.name for node in node_infos if not node.offline)
mc_data = fn(node.name for node in node_infos if node.master_candidate)
mc_ips_data = fn(node.primary_ip for node in node_infos
if node.master_candidate)
node_data = fn(node_names)
node_pri_ips_data = fn(node_pri_ips)
node_snd_ips_data = fn(node_snd_ips)
node_vm_capable_data = fn(node_vm_capable)
cluster = self._ConfigData().cluster
cluster_tags = fn(cluster.GetTags())
master_candidates_certs = fn("%s=%s" % (mc_uuid, mc_cert)
for mc_uuid, mc_cert
in cluster.candidate_certs.items())
hypervisor_list = fn(cluster.enabled_hypervisors)
all_hvparams = self._GetAllHvparamsStrings(constants.HYPER_TYPES)
uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in
self._ConfigData().nodegroups.values()]
nodegroups_data = fn(utils.NiceSort(nodegroups))
networks = ["%s %s" % (net.uuid, net.name) for net in
self._ConfigData().networks.values()]
networks_data = fn(utils.NiceSort(networks))
ssconf_values = {
constants.SS_CLUSTER_NAME: cluster.cluster_name,
constants.SS_CLUSTER_TAGS: cluster_tags,
constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir,
constants.SS_GLUSTER_STORAGE_DIR: cluster.gluster_storage_dir,
constants.SS_MASTER_CANDIDATES: mc_data,
constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
constants.SS_MASTER_CANDIDATES_CERTS: master_candidates_certs,
constants.SS_MASTER_IP: cluster.master_ip,
constants.SS_MASTER_NETDEV: cluster.master_netdev,
constants.SS_MASTER_NETMASK: str(cluster.master_netmask),
constants.SS_MASTER_NODE: self._UnlockedGetNodeName(cluster.master_node),
constants.SS_NODE_LIST: node_data,
constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
constants.SS_NODE_VM_CAPABLE: node_vm_capable_data,
constants.SS_OFFLINE_NODES: off_data,
constants.SS_ONLINE_NODES: on_data,
constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family),
constants.SS_INSTANCE_LIST: instance_data,
constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
constants.SS_HYPERVISOR_LIST: hypervisor_list,
constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
constants.SS_UID_POOL: uid_pool,
constants.SS_NODEGROUPS: nodegroups_data,
constants.SS_NETWORKS: networks_data,
constants.SS_ENABLED_USER_SHUTDOWN: str(cluster.enabled_user_shutdown),
}
ssconf_values = self._ExtendByAllHvparamsStrings(ssconf_values,
all_hvparams)
bad_values = [(k, v) for k, v in ssconf_values.items()
if not isinstance(v, (str, basestring))]
if bad_values:
err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values)
raise errors.ConfigurationError("Some ssconf key(s) have non-string"
" values: %s" % err)
return ssconf_values
@_ConfigSync(shared=1)
def GetSsconfValues(self):
"""Wrapper using lock around _UnlockedGetSsconf().
"""
return self._UnlockedGetSsconfValues()
@_ConfigSync(shared=1)
def GetVGName(self):
"""Return the volume group name.
"""
return self._ConfigData().cluster.volume_group_name
@_ConfigSync()
def SetVGName(self, vg_name):
"""Set the volume group name.
"""
self._ConfigData().cluster.volume_group_name = vg_name
self._ConfigData().cluster.serial_no += 1
@_ConfigSync(shared=1)
def GetDRBDHelper(self):
"""Return DRBD usermode helper.
"""
return self._ConfigData().cluster.drbd_usermode_helper
@_ConfigSync()
def SetDRBDHelper(self, drbd_helper):
"""Set DRBD usermode helper.
"""
self._ConfigData().cluster.drbd_usermode_helper = drbd_helper
self._ConfigData().cluster.serial_no += 1
@_ConfigSync(shared=1)
def GetMACPrefix(self):
"""Return the mac prefix.
"""
return self._ConfigData().cluster.mac_prefix
@_ConfigSync(shared=1)
def GetClusterInfo(self):
"""Returns information about the cluster
@rtype: L{objects.Cluster}
@return: the cluster object
"""
return self._ConfigData().cluster
@_ConfigSync(shared=1)
def HasAnyDiskOfType(self, dev_type):
"""Check if in there is at disk of the given type in the configuration.
"""
return self._ConfigData().HasAnyDiskOfType(dev_type)
@_ConfigSync(shared=1)
def GetDetachedConfig(self):
"""Returns a detached version of a ConfigManager, which represents
a read-only snapshot of the configuration at this particular time.
"""
return DetachedConfig(self._ConfigData())
@_ConfigSync()
def Update(self, target, feedback_fn, ec_id=None):
"""Notify function to be called after updates.
This function must be called when an object (as returned by
GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
caller wants the modifications saved to the backing store. Note
that all modified objects will be saved, but the target argument
is the one the caller wants to ensure that it's saved.
@param target: an instance of either L{objects.Cluster},
L{objects.Node} or L{objects.Instance} which is existing in
the cluster
@param feedback_fn: Callable feedback function
"""
if self._ConfigData() is None:
raise errors.ProgrammerError("Configuration file not read,"
" cannot save.")
def check_serial(target, current):
if current is None:
raise errors.ConfigurationError("Configuration object unknown")
elif current.serial_no != target.serial_no:
raise errors.ConfigurationError("Configuration object updated since"
" it has been read: %d != %d",
current.serial_no, target.serial_no)
def replace_in(target, tdict):
check_serial(target, tdict.get(target.uuid))
tdict[target.uuid] = target
update_serial = False
if isinstance(target, objects.Cluster):
check_serial(target, self._ConfigData().cluster)
self._ConfigData().cluster = target
elif isinstance(target, objects.Node):
replace_in(target, self._ConfigData().nodes)
update_serial = True
elif isinstance(target, objects.Instance):
replace_in(target, self._ConfigData().instances)
elif isinstance(target, objects.NodeGroup):
replace_in(target, self._ConfigData().nodegroups)
elif isinstance(target, objects.Network):
replace_in(target, self._ConfigData().networks)
elif isinstance(target, objects.Disk):
replace_in(target, self._ConfigData().disks)
else:
raise errors.ProgrammerError("Invalid object type (%s) passed to"
" ConfigWriter.Update" % type(target))
target.serial_no += 1
target.mtime = now = time.time()
if update_serial:
# for node updates, we need to increase the cluster serial too
self._ConfigData().cluster.serial_no += 1
self._ConfigData().cluster.mtime = now
if isinstance(target, objects.Instance):
self._UnlockedReleaseDRBDMinors(target.uuid)
if ec_id is not None:
# Commit all ips reserved by OpInstanceSetParams and OpGroupSetParams
# FIXME: After RemoveInstance is moved to WConfd, use its internal
# functions from TempRes module.
self._UnlockedCommitTemporaryIps(ec_id)
# Just verify the configuration with our feedback function.
# It will get written automatically by the decorator.
self._UnlockedVerifyConfigAndLog(feedback_fn=feedback_fn)
def _UnlockedDropECReservations(self, _ec_id):
"""Drop per-execution-context reservations
"""
# FIXME: Remove the following two lines after all reservations are moved to
# wconfd.
for rm in self._all_rms:
rm.DropECReservations(_ec_id)
if not self._offline:
self._wconfd.DropAllReservations(self._GetWConfdContext())
def DropECReservations(self, ec_id):
self._UnlockedDropECReservations(ec_id)
@_ConfigSync(shared=1)
def GetAllNetworksInfo(self):
"""Get configuration info of all the networks.
"""
return dict(self._ConfigData().networks)
def _UnlockedGetNetworkList(self):
"""Get the list of networks.
This function is for internal use, when the config lock is already held.
"""
return self._ConfigData().networks.keys()
@_ConfigSync(shared=1)
def GetNetworkList(self):
"""Get the list of networks.
@return: array of networks, ex. ["main", "vlan100", "200]
"""
return self._UnlockedGetNetworkList()
@_ConfigSync(shared=1)
def GetNetworkNames(self):
"""Get a list of network names
"""
names = [net.name
for net in self._ConfigData().networks.values()]
return names
def _UnlockedGetNetwork(self, uuid):
"""Returns information about a network.
This function is for internal use, when the config lock is already held.
"""
if uuid not in self._ConfigData().networks:
return None
return self._ConfigData().networks[uuid]
@_ConfigSync(shared=1)
def GetNetwork(self, uuid):
"""Returns information about a network.
It takes the information from the configuration file.
@param uuid: UUID of the network
@rtype: L{objects.Network}
@return: the network object
"""
return self._UnlockedGetNetwork(uuid)
@_ConfigSync()
def AddNetwork(self, net, ec_id, check_uuid=True):
"""Add a network to the configuration.
@type net: L{objects.Network}
@param net: the Network object to add
@type ec_id: string
@param ec_id: unique id for the job to use when creating a missing UUID
"""
self._UnlockedAddNetwork(net, ec_id, check_uuid)
def _UnlockedAddNetwork(self, net, ec_id, check_uuid):
"""Add a network to the configuration.
"""
logging.info("Adding network %s to configuration", net.name)
if check_uuid:
self._EnsureUUID(net, ec_id)
net.serial_no = 1
net.ctime = net.mtime = time.time()
self._ConfigData().networks[net.uuid] = net
self._ConfigData().cluster.serial_no += 1
def _UnlockedLookupNetwork(self, target):
"""Lookup a network's UUID.
@type target: string
@param target: network name or UUID
@rtype: string
@return: network UUID
@raises errors.OpPrereqError: when the target network cannot be found
"""
if target is None:
return None
if target in self._ConfigData().networks:
return target
for net in self._ConfigData().networks.values():
if net.name == target:
return net.uuid
raise errors.OpPrereqError("Network '%s' not found" % target,
errors.ECODE_NOENT)
@_ConfigSync(shared=1)
def LookupNetwork(self, target):
"""Lookup a network's UUID.
This function is just a wrapper over L{_UnlockedLookupNetwork}.
@type target: string
@param target: network name or UUID
@rtype: string
@return: network UUID
"""
return self._UnlockedLookupNetwork(target)
@_ConfigSync()
def RemoveNetwork(self, network_uuid):
"""Remove a network from the configuration.
@type network_uuid: string
@param network_uuid: the UUID of the network to remove
"""
logging.info("Removing network %s from configuration", network_uuid)
if network_uuid not in self._ConfigData().networks:
raise errors.ConfigurationError("Unknown network '%s'" % network_uuid)
del self._ConfigData().networks[network_uuid]
self._ConfigData().cluster.serial_no += 1
def _UnlockedGetGroupNetParams(self, net_uuid, node_uuid):
"""Get the netparams (mode, link) of a network.
Get a network's netparams for a given node.
@type net_uuid: string
@param net_uuid: network uuid
@type node_uuid: string
@param node_uuid: node UUID
@rtype: dict or None
@return: netparams
"""
node_info = self._UnlockedGetNodeInfo(node_uuid)
nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
netparams = nodegroup_info.networks.get(net_uuid, None)
return netparams
@_ConfigSync(shared=1)
def GetGroupNetParams(self, net_uuid, node_uuid):
"""Locking wrapper of _UnlockedGetGroupNetParams()
"""
return self._UnlockedGetGroupNetParams(net_uuid, node_uuid)
@_ConfigSync(shared=1)
def CheckIPInNodeGroup(self, ip, node_uuid):
"""Check IP uniqueness in nodegroup.
Check networks that are connected in the node's node group
if ip is contained in any of them. Used when creating/adding
a NIC to ensure uniqueness among nodegroups.
@type ip: string
@param ip: ip address
@type node_uuid: string
@param node_uuid: node UUID
@rtype: (string, dict) or (None, None)
@return: (network name, netparams)
"""
if ip is None:
return (None, None)
node_info = self._UnlockedGetNodeInfo(node_uuid)
nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
for net_uuid in nodegroup_info.networks.keys():
net_info = self._UnlockedGetNetwork(net_uuid)
pool = network.AddressPool(net_info)
if pool.Contains(ip):
return (net_info.name, nodegroup_info.networks[net_uuid])
return (None, None)
@_ConfigSync(shared=1)
def GetCandidateCerts(self):
"""Returns the candidate certificate map.
"""
return self._ConfigData().cluster.candidate_certs
@_ConfigSync()
def AddNodeToCandidateCerts(self, node_uuid, cert_digest,
info_fn=logging.info, warn_fn=logging.warn):
"""Adds an entry to the candidate certificate map.
@type node_uuid: string
@param node_uuid: the node's UUID
@type cert_digest: string
@param cert_digest: the digest of the node's client SSL certificate
@type info_fn: function
@param info_fn: logging function for information messages
@type warn_fn: function
@param warn_fn: logging function for warning messages
"""
cluster = self._ConfigData().cluster
if node_uuid in cluster.candidate_certs:
old_cert_digest = cluster.candidate_certs[node_uuid]
if old_cert_digest == cert_digest:
if info_fn is not None:
info_fn("Certificate digest for node %s already in config."
"Not doing anything." % node_uuid)
return
else:
if warn_fn is not None:
warn_fn("Overriding differing certificate digest for node %s"
% node_uuid)
cluster.candidate_certs[node_uuid] = cert_digest
@_ConfigSync()
def RemoveNodeFromCandidateCerts(self, node_uuid,
warn_fn=logging.warn):
"""Removes the entry of the given node in the certificate map.
@type node_uuid: string
@param node_uuid: the node's UUID
@type warn_fn: function
@param warn_fn: logging function for warning messages
"""
cluster = self._ConfigData().cluster
if node_uuid not in cluster.candidate_certs:
if warn_fn is not None:
warn_fn("Cannot remove certifcate for node %s, because it's not"
" in the candidate map." % node_uuid)
return
del cluster.candidate_certs[node_uuid]
def FlushConfig(self):
"""Force the distribution of configuration to master candidates.
It is not necessary to hold a lock for this operation, it is handled
internally by WConfd.
"""
if not self._offline:
self._wconfd.FlushConfig()
class DetachedConfig(ConfigWriter):
def __init__(self, config_data):
super(DetachedConfig, self).__init__(self, offline=True)
self._SetConfigData(config_data)
@staticmethod
def _WriteCallError():
raise errors.ProgrammerError("DetachedConfig supports only read-only"
" operations")
def _OpenConfig(self, shared):
if not shared:
DetachedConfig._WriteCallError()
def _CloseConfig(self, save):
if save:
DetachedConfig._WriteCallError()
| ganeti-github-testing/ganeti-test-1 | lib/config.py | Python | bsd-2-clause | 117,521 |
import torch
from termcolor import cprint, colored as c
def num_flat_features(x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def forward_tracer(self, input, output):
cprint(c("--> " + self.__class__.__name__, 'red') + " ===forward==> ")
# print('')
# print('input: ', type(input))
# print('input[0]: ', type(input[0]))
# print('output: ', type(output))
# print('')
# print('input size:', input[0].size())
# print('output size:', output.data.size())
# print('output norm:', output.data.norm())
def backward_tracer(self, input, output):
cprint(c("--> " + self.__class__.__name__, 'red') + " ===backward==> ")
CHARS = "\x00 ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890.,;:?\"'\n\r\t~!@#$%^&*()-=_+<>{}[]|\\`~\xa0"
CHAR_DICT = {ch: i for i, ch in enumerate(CHARS)}
class Char2Vec():
def __init__(self, size=None, chars=None):
if chars is None:
self.chars = CHARS
else:
self.chars = chars
self.char_dict = {ch: i for i, ch in enumerate(CHARS)}
if size:
self.size = size
else:
self.size = len(CHARS)
def one_hot(self, source):
y = torch.LongTensor([[self.char_dict[char]] for char in source])
y_onehot = torch.zeros(len(source), self.size)
y_onehot.scatter_(1, y, 1)
return y_onehot
def char_code(self, source):
return torch.LongTensor([self.char_dict[char] for char in source])
def vec2str(self, vec):
chars = [self.chars[ind] for ind in vec.cpu().data.numpy()]
return ''.join(chars)
if __name__ == "__main__":
# test
print(Char2Vec(65).one_hot("B"))
encoded = list(map(Char2Vec(65).one_hot, "Mary has a little lamb."))
print(encoded)
| kinshuk4/MoocX | misc/deep_learning_notes/pytorch_playground/utils.py | Python | mit | 1,898 |
# -*- coding: utf-8 -*-
#
from django.urls import path
from rest_framework.routers import DefaultRouter
from .. import api
app_name = 'orgs'
router = DefaultRouter()
# 将会删除
router.register(r'org/(?P<org_id>[0-9a-zA-Z\-]{36})/membership/admins',
api.OrgMembershipAdminsViewSet, 'membership-admins')
router.register(r'org/(?P<org_id>[0-9a-zA-Z\-]{36})/membership/users',
api.OrgMembershipUsersViewSet, 'membership-users'),
# 替换为这个
router.register(r'orgs/(?P<org_id>[0-9a-zA-Z\-]{36})/membership/admins',
api.OrgMembershipAdminsViewSet, 'membership-admins-2')
router.register(r'orgs/(?P<org_id>[0-9a-zA-Z\-]{36})/membership/users',
api.OrgMembershipUsersViewSet, 'membership-users-2'),
router.register(r'orgs', api.OrgViewSet, 'org')
urlpatterns = [
]
urlpatterns += router.urls
| liuzheng712/jumpserver | apps/orgs/urls/api_urls.py | Python | gpl-2.0 | 866 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import datetime
import threading
from applicationinsights import TelemetryClient
from applicationinsights.exceptions import enable
from azclishell import __version__
from azure.cli.core._profile import Profile
from azure.cli.core.telemetry import _user_agrees_to_telemetry
INSTRUMENTATION_KEY = '762871d5-45a2-4d67-bf47-e396caf53d9d'
def my_context(tel_client):
""" context for the application """
tel_client.context.application.id = 'Azure CLI Shell'
tel_client.context.application.ver = __version__
tel_client.context.user.id = Profile().get_installation_id()
tel_client.context.instrumentation_key = INSTRUMENTATION_KEY
class Telemetry(TelemetryClient):
""" base telemetry sessions """
start_time = None
end_time = None
def track_ssg(self, gesture, cmd):
""" track shell specific gestures """
self.track_event('Shell Specific Gesture', {gesture : cmd})
def track_key(self, key):
""" tracks the special key bindings """
self.track_event('Key Press', {"key": key})
@_user_agrees_to_telemetry
def start(self):
""" starts recording stuff """
self.start_time = str(datetime.datetime.now())
@_user_agrees_to_telemetry
def conclude(self):
""" concludings recording stuff """
self.end_time = str(datetime.datetime.now())
self.track_event('Run', {'start time' : self.start_time,
'end time' : self.end_time})
thread1 = TelThread(self.flush)
thread1.start()
class TelThread(threading.Thread):
""" telemetry thread for exiting """
def __init__(self, threadfunc):
threading.Thread.__init__(self)
self.threadfunc = threadfunc
def run(self):
self.threadfunc()
TC = Telemetry(INSTRUMENTATION_KEY)
enable(INSTRUMENTATION_KEY)
my_context(TC)
| oakeyc/azure-cli-shell | azclishell/telemetry.py | Python | mit | 2,206 |
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import collections
import hashlib
import os
import platform
import subprocess
import sys
import settings
if sys.version_info.major >= 3:
import runners.util as util # pylint: disable=import-error
else:
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/runners')
import util
OUTPUT_DIR = os.path.join(settings.PROJECT_DIR, 'build', 'tests')
Options = collections.namedtuple('Options', ['name', 'build_args', 'test_args', 'skip'])
Options.__new__.__defaults__ = ([], [], False)
def skip_if(condition, desc):
return desc if condition else False
OPTIONS_COMMON = ['--lto=off']
OPTIONS_PROFILE_MIN = ['--profile=minimal']
OPTIONS_PROFILE_ES51 = ['--profile=es5.1']
OPTIONS_PROFILE_ESNEXT = ['--profile=es.next', '--function-to-string=on']
OPTIONS_STACK_LIMIT = ['--stack-limit=96']
OPTIONS_GC_MARK_LIMIT = ['--gc-mark-limit=16']
OPTIONS_MEM_STRESS = ['--mem-stress-test=on']
OPTIONS_DEBUG = ['--debug']
OPTIONS_SNAPSHOT = ['--snapshot-save=on', '--snapshot-exec=on', '--jerry-cmdline-snapshot=on']
OPTIONS_UNITTESTS = ['--unittests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on',
'--vm-throw=on', '--line-info=on', '--mem-stats=on']
OPTIONS_DOCTESTS = ['--doctests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on']
# Test options for unittests
JERRY_UNITTESTS_OPTIONS = [
Options('unittests-es.next',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ESNEXT
+ ['--promise-callback=on']),
Options('doctests-es.next',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ESNEXT
+ ['--promise-callback=on']),
Options('unittests-es5.1',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51),
Options('doctests-es5.1',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51),
Options('unittests-es5.1-init-fini',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51
+ ['--cmake-param=-DFEATURE_INIT_FINI=ON'],
skip=skip_if((sys.platform == 'win32'), 'FEATURE_INIT_FINI build flag isn\'t supported on Windows,' +
' because Microsoft Visual C/C++ Compiler doesn\'t support' +
' library constructors and destructors.')),
Options('unittests-es5.1-math',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51
+ ['--jerry-math=on']),
]
# Test options for jerry-tests
JERRY_TESTS_OPTIONS = [
Options('jerry_tests-es.next',
OPTIONS_COMMON + OPTIONS_PROFILE_ESNEXT + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ OPTIONS_MEM_STRESS),
Options('jerry_tests-es5.1',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT),
Options('jerry_tests-es5.1-snapshot',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT,
['--snapshot']),
Options('jerry_tests-es5.1-cpointer_32bit',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ ['--cpointer-32bit=on', '--mem-heap=1024']),
Options('jerry_tests-es5.1-external_context',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ ['--external-context=on']),
]
# Test options for test262
TEST262_TEST_SUITE_OPTIONS = [
Options('test262_tests', OPTIONS_PROFILE_ES51),
]
# Test options for test262-es2015
TEST262_ES2015_TEST_SUITE_OPTIONS = [
Options('test262_tests_es2015', OPTIONS_PROFILE_ESNEXT + ['--line-info=on', '--error-messages=on']),
]
# Test options for test262-esnext
TEST262_ESNEXT_TEST_SUITE_OPTIONS = [
Options('test262_tests_esnext', OPTIONS_PROFILE_ESNEXT
+ ['--line-info=on', '--error-messages=on', '--mem-heap=20480']),
]
# Test options for jerry-debugger
DEBUGGER_TEST_OPTIONS = [
Options('jerry_debugger_tests',
['--jerry-debugger=on'])
]
# Test options for buildoption-test
JERRY_BUILDOPTIONS = [
Options('buildoption_test-lto',
['--lto=on']),
Options('buildoption_test-error_messages',
['--error-messages=on']),
Options('buildoption_test-logging',
['--logging=on']),
Options('buildoption_test-amalgam',
['--amalgam=on']),
Options('buildoption_test-valgrind',
['--valgrind=on']),
Options('buildoption_test-mem_stats',
['--mem-stats=on']),
Options('buildoption_test-show_opcodes',
['--show-opcodes=on']),
Options('buildoption_test-show_regexp_opcodes',
['--show-regexp-opcodes=on']),
Options('buildoption_test-cpointer_32bit',
['--compile-flag=-m32', '--cpointer-32bit=on', '--system-allocator=on'],
skip=skip_if(
platform.system() != 'Linux' or (platform.machine() != 'i386' and platform.machine() != 'x86_64'),
'-m32 is only supported on x86[-64]-linux')
),
Options('buildoption_test-jerry_math',
['--jerry-math=on']),
Options('buildoption_test-no_lcache_prophashmap',
['--compile-flag=-DJERRY_LCACHE=0', '--compile-flag=-DJERRY_PROPERTY_HASHMAP=0']),
Options('buildoption_test-external_context',
['--external-context=on']),
Options('buildoption_test-shared_libs',
['--shared-libs=on'],
skip=skip_if((sys.platform == 'win32'), 'Not yet supported, link failure on Windows')),
Options('buildoption_test-cmdline_test',
['--jerry-cmdline-test=on'],
skip=skip_if((sys.platform == 'win32'), 'rand() can\'t be overriden on Windows (benchmarking.c)')),
Options('buildoption_test-cmdline_snapshot',
['--jerry-cmdline-snapshot=on']),
Options('buildoption_test-recursion_limit',
OPTIONS_STACK_LIMIT),
Options('buildoption_test-gc-mark_limit',
OPTIONS_GC_MARK_LIMIT),
Options('buildoption_test-jerry-debugger',
['--jerry-debugger=on']),
Options('buildoption_test-module-off',
['--compile-flag=-DJERRY_MODULE_SYSTEM=0', '--lto=off']),
Options('buildoption_test-builtin-proxy-off',
['--compile-flag=-DJERRY_BUILTIN_PROXY=0']),
]
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--toolchain', metavar='FILE',
help='Add toolchain file')
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print out failing tests')
parser.add_argument('--buildoptions', metavar='LIST',
help='Add a comma separated list of extra build options to each test')
parser.add_argument('--skip-list', metavar='LIST',
help='Add a comma separated list of patterns of the excluded JS-tests')
parser.add_argument('--outdir', metavar='DIR', default=OUTPUT_DIR,
help='Specify output directory (default: %(default)s)')
parser.add_argument('--check-signed-off', metavar='TYPE', nargs='?',
choices=['strict', 'tolerant', 'gh-actions'], const='strict',
help='Run signed-off check (%(choices)s; default type if not given: %(const)s)')
parser.add_argument('--check-cppcheck', action='store_true',
help='Run cppcheck')
parser.add_argument('--check-doxygen', action='store_true',
help='Run doxygen')
parser.add_argument('--check-pylint', action='store_true',
help='Run pylint')
parser.add_argument('--check-format', action='store_true',
help='Run format check')
parser.add_argument('--check-license', action='store_true',
help='Run license check')
parser.add_argument('--check-magic-strings', action='store_true',
help='Run "magic string source code generator should be executed" check')
parser.add_argument('--build-debug', action='store_true',
help='Build debug version jerryscript')
parser.add_argument('--jerry-debugger', action='store_true',
help='Run jerry-debugger tests')
parser.add_argument('--jerry-tests', action='store_true',
help='Run jerry-tests')
parser.add_argument('--test262', action='store_true',
help='Run test262 - ES5.1')
parser.add_argument('--test262-es2015', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES2015. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-esnext', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ESnext. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-test-list', metavar='LIST',
help='Add a comma separated list of tests or directories to run in test262 test suite')
parser.add_argument('--unittests', action='store_true',
help='Run unittests (including doctests)')
parser.add_argument('--buildoption-test', action='store_true',
help='Run buildoption-test')
parser.add_argument('--all', '--precommit', action='store_true',
help='Run all tests')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
script_args = parser.parse_args()
if script_args.test262_test_list and not \
(script_args.test262 or script_args.test262_es2015 or script_args.test262_esnext):
print("--test262-test-list is only allowed with --test262 or --test262-es2015 or --test262-esnext\n")
parser.print_help()
sys.exit(1)
return script_args
BINARY_CACHE = {}
TERM_NORMAL = '\033[0m'
TERM_YELLOW = '\033[1;33m'
TERM_BLUE = '\033[1;34m'
TERM_RED = '\033[1;31m'
def report_command(cmd_type, cmd, env=None):
sys.stderr.write('%s%s%s\n' % (TERM_BLUE, cmd_type, TERM_NORMAL))
if env is not None:
sys.stderr.write(''.join('%s%s=%r \\%s\n' % (TERM_BLUE, var, val, TERM_NORMAL)
for var, val in sorted(env.items())))
sys.stderr.write('%s%s%s\n' % (TERM_BLUE, (' \\%s\n\t%s' % (TERM_NORMAL, TERM_BLUE)).join(cmd), TERM_NORMAL))
def report_skip(job):
sys.stderr.write('%sSkipping: %s' % (TERM_YELLOW, job.name))
if job.skip:
sys.stderr.write(' (%s)' % job.skip)
sys.stderr.write('%s\n' % TERM_NORMAL)
def create_binary(job, options):
build_args = job.build_args[:]
build_dir_path = os.path.join(options.outdir, job.name)
if options.build_debug:
build_args.extend(OPTIONS_DEBUG)
build_dir_path = os.path.join(options.outdir, job.name + '-debug')
if options.buildoptions:
for option in options.buildoptions.split(','):
if option not in build_args:
build_args.append(option)
build_cmd = util.get_python_cmd_prefix()
build_cmd.append(settings.BUILD_SCRIPT)
build_cmd.extend(build_args)
build_cmd.append('--builddir=%s' % build_dir_path)
install_dir_path = os.path.join(build_dir_path, 'local')
build_cmd.append('--install=%s' % install_dir_path)
if options.toolchain:
build_cmd.append('--toolchain=%s' % options.toolchain)
report_command('Build command:', build_cmd)
binary_key = tuple(sorted(build_args))
if binary_key in BINARY_CACHE:
ret, build_dir_path = BINARY_CACHE[binary_key]
sys.stderr.write('(skipping: already built at %s with returncode %d)\n' % (build_dir_path, ret))
return ret, build_dir_path
try:
subprocess.check_output(build_cmd)
ret = 0
except subprocess.CalledProcessError as err:
print(err.output)
ret = err.returncode
BINARY_CACHE[binary_key] = (ret, build_dir_path)
return ret, build_dir_path
def get_binary_path(build_dir_path):
executable_extension = '.exe' if sys.platform == 'win32' else ''
return os.path.join(build_dir_path, 'local', 'bin', 'jerry' + executable_extension)
def hash_binary(bin_path):
blocksize = 65536
hasher = hashlib.sha1()
with open(bin_path, 'rb') as bin_file:
buf = bin_file.read(blocksize)
while buf:
hasher.update(buf)
buf = bin_file.read(blocksize)
return hasher.hexdigest()
def iterate_test_runner_jobs(jobs, options):
tested_paths = set()
tested_hashes = {}
for job in jobs:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
yield job, ret_build, None
if build_dir_path in tested_paths:
sys.stderr.write('(skipping: already tested with %s)\n' % build_dir_path)
continue
else:
tested_paths.add(build_dir_path)
bin_path = get_binary_path(build_dir_path)
bin_hash = hash_binary(bin_path)
if bin_hash in tested_hashes:
sys.stderr.write('(skipping: already tested with equivalent %s)\n' % tested_hashes[bin_hash])
continue
else:
tested_hashes[bin_hash] = build_dir_path
test_cmd = util.get_python_cmd_prefix()
test_cmd.extend([settings.TEST_RUNNER_SCRIPT, '--engine', bin_path])
yield job, ret_build, test_cmd
def run_check(runnable, env=None):
report_command('Test command:', runnable, env=env)
if env is not None:
full_env = dict(os.environ)
full_env.update(env)
env = full_env
proc = subprocess.Popen(runnable, env=env)
proc.wait()
return proc.returncode
def run_jerry_debugger_tests(options):
ret_build = ret_test = 0
for job in DEBUGGER_TEST_OPTIONS:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
for channel in ["websocket", "rawpacket"]:
for test_file in os.listdir(settings.DEBUGGER_TESTS_DIR):
if test_file.endswith(".cmd"):
test_case, _ = os.path.splitext(test_file)
test_case_path = os.path.join(settings.DEBUGGER_TESTS_DIR, test_case)
test_cmd = [
settings.DEBUGGER_TEST_RUNNER_SCRIPT,
get_binary_path(build_dir_path),
channel,
settings.DEBUGGER_CLIENT_SCRIPT,
os.path.relpath(test_case_path, settings.PROJECT_DIR)
]
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_jerry_tests(options):
ret_build = ret_test = 0
for job, ret_build, test_cmd in iterate_test_runner_jobs(JERRY_TESTS_OPTIONS, options):
if ret_build:
break
test_cmd.append('--test-dir')
test_cmd.append(settings.JERRY_TESTS_DIR)
if options.quiet:
test_cmd.append("-q")
skip_list = []
if '--profile=es.next' in job.build_args:
skip_list.append(os.path.join('es5.1', ''))
else:
skip_list.append(os.path.join('es.next', ''))
if options.skip_list:
skip_list.append(options.skip_list)
if skip_list:
test_cmd.append("--skip-list=" + ",".join(skip_list))
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd, env=dict(TZ='UTC'))
return ret_build | ret_test
def run_test262_test_suite(options):
ret_build = ret_test = 0
jobs = []
if options.test262:
jobs.extend(TEST262_TEST_SUITE_OPTIONS)
if options.test262_es2015:
jobs.extend(TEST262_ES2015_TEST_SUITE_OPTIONS)
if options.test262_esnext:
jobs.extend(TEST262_ESNEXT_TEST_SUITE_OPTIONS)
for job in jobs:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
test_cmd = util.get_python_cmd_prefix() + [
settings.TEST262_RUNNER_SCRIPT,
'--engine', get_binary_path(build_dir_path),
'--test262-object',
'--test-dir', settings.TEST262_TEST_SUITE_DIR
]
if job.name.endswith('es2015'):
test_cmd.append('--es2015')
test_cmd.append(options.test262_es2015)
elif job.name.endswith('esnext'):
test_cmd.append('--esnext')
test_cmd.append(options.test262_esnext)
else:
test_cmd.append('--es51')
if job.test_args:
test_cmd.extend(job.test_args)
if options.test262_test_list:
test_cmd.append('--test262-test-list')
test_cmd.append(options.test262_test_list)
ret_test |= run_check(test_cmd, env=dict(TZ='America/Los_Angeles'))
return ret_build | ret_test
def run_unittests(options):
ret_build = ret_test = 0
for job in JERRY_UNITTESTS_OPTIONS:
if job.skip:
report_skip(job)
continue
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
if sys.platform == 'win32':
if options.build_debug:
build_config = "Debug"
else:
build_config = "MinSizeRel"
else:
build_config = ""
ret_test |= run_check(
util.get_python_cmd_prefix() +
[settings.UNITTEST_RUNNER_SCRIPT] +
[os.path.join(build_dir_path, 'tests', build_config)] +
(["-q"] if options.quiet else [])
)
return ret_build | ret_test
def run_buildoption_test(options):
for job in JERRY_BUILDOPTIONS:
if job.skip:
report_skip(job)
continue
ret, _ = create_binary(job, options)
if ret:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
return ret
Check = collections.namedtuple('Check', ['enabled', 'runner', 'arg'])
def main(options):
checks = [
Check(options.check_signed_off, run_check, [settings.SIGNED_OFF_SCRIPT]
+ {'tolerant': ['--tolerant'], 'gh-actions': ['--gh-actions']}.get(options.check_signed_off, [])),
Check(options.check_cppcheck, run_check, [settings.CPPCHECK_SCRIPT]),
Check(options.check_doxygen, run_check, [settings.DOXYGEN_SCRIPT]),
Check(options.check_pylint, run_check, [settings.PYLINT_SCRIPT]),
Check(options.check_format, run_check, [settings.FORMAT_SCRIPT]),
Check(options.check_license, run_check, [settings.LICENSE_SCRIPT]),
Check(options.check_magic_strings, run_check, [settings.MAGIC_STRINGS_SCRIPT]),
Check(options.jerry_debugger, run_jerry_debugger_tests, options),
Check(options.jerry_tests, run_jerry_tests, options),
Check(options.test262 or options.test262_es2015 or options.test262_esnext, run_test262_test_suite, options),
Check(options.unittests, run_unittests, options),
Check(options.buildoption_test, run_buildoption_test, options),
]
for check in checks:
if check.enabled or options.all:
ret = check.runner(check.arg)
if ret:
sys.exit(ret)
if __name__ == "__main__":
main(get_arguments())
| robertsipka/jerryscript | tools/run-tests.py | Python | apache-2.0 | 20,729 |
#!/usr/bin/env python3
import os
import io
import sys
import re
import xml.etree.ElementTree as ET
# on msys, use crlf output
nl = None
if sys.platform == 'msys':
nl = "\r\n"
# Get the file, relative to this script's location (same directory)
# that way we're not sensitive to CWD
pathname = os.path.abspath(os.path.dirname(sys.argv[0])) + os.path.sep
# open the file for write
f = open(pathname + 'vk_dispatch_defs.h', mode='w', newline = nl)
# open XML registry
registry = ET.parse(pathname + 'vk.xml').getroot()
# f.write the file, starting with a template header
f.write('''
/******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2019 Baldur Karlsson
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
/******************************************************************************
* Generated from Khronos's vk.xml:
'''.lstrip())
def prefix_star(line):
if line == '':
return ' *'
else:
return ' * ' + line
# Print the first two comments with the license
for comment in registry.findall('comment')[0:2]:
f.write("\n".join([prefix_star(line.strip()) for line in comment.text.split('\n')]))
f.write('''
******************************************************************************/
// This file is autogenerated with gen_dispatch_table.py - any changes will be overwritten next time
// that script is run.
// $ ./gen_spirv_code.py
#pragma once
#include "official/vulkan.h"
// this file is autogenerated, so don't worry about clang-format issues
// clang-format off
'''.lstrip())
platform_defines = {}
# Cache the platform defines that protect each platform name
for plat in registry.findall('platforms/platform'):
platform_defines[plat.attrib['name']] = plat.attrib['protect']
# Process all commands and categorise into instance or device
commands = {}
INSTANCE_CMD = 1
DEVICE_CMD = 2
# Some special cases we manually set
commands['vkCreateInstance'] = INSTANCE_CMD
commands['vkEnumerateInstanceVersion'] = INSTANCE_CMD
commands['vkEnumerateInstanceLayerProperties'] = INSTANCE_CMD
commands['vkEnumerateInstanceExtensionProperties'] = INSTANCE_CMD
import xml
for cmd in registry.findall('commands/command'):
if 'alias' in cmd.attrib:
name = cmd.attrib['name']
alias = cmd.attrib['alias']
if alias not in commands:
raise ValueError('alias {} of {} defined, but {} is unknown'.format(name, alias, alias))
commands[name] = commands[alias]
continue
name = cmd.find('proto/name').text
if name in commands:
continue
first_param_type = cmd.find('param/type').text
if first_param_type == 'VkInstance' or first_param_type == 'VkPhysicalDevice':
commands[name] = INSTANCE_CMD
elif first_param_type == 'VkDevice' or first_param_type == 'VkQueue' or first_param_type == 'VkCommandBuffer':
commands[name] = DEVICE_CMD
else:
raise ValueError('type {} of first parameter to {} is unexpected'.format(first_param_type, name))
inst_commands = ""
dev_commands = ""
processed_commands = [] # some commands come from multiple extensions. Include them only in the first
def process_feature(root, name):
global inst_commands, dev_commands, processed_commands
inst = ""
dev = ""
for req in root.findall('require'):
for cmd in req.findall('command'):
function = cmd.attrib['name']
if function in processed_commands:
continue
processed_commands.append(function)
if function not in commands:
raise ValueError('command {} referenced by {} is unknown'.format(function, name))
table = commands[function]
if table == INSTANCE_CMD:
inst += '\n PFN_{} {};'.format(function, function[2:])
elif table == DEVICE_CMD:
dev += '\n PFN_{} {};'.format(function, function[2:])
else:
raise ValueError('command {} has unknown table type {}'.format(function, table))
if 'platform' in root.attrib:
if inst != "":
inst = '\n#ifdef {plat}{inst}\n#endif // {plat}'.format(plat = platform_defines[root.attrib['platform']], inst = inst)
if dev != "":
dev = '\n#ifdef {plat}{dev}\n#endif // {plat}'.format(plat = platform_defines[root.attrib['platform']], dev = dev)
if inst != "":
inst_commands += " // {name}{inst}\n\n".format(**locals())
if dev != "":
dev_commands += " // {name}{dev}\n\n".format(**locals())
# Look at all features
for feat in registry.findall('feature'):
# Only process vulkan features
if 'api' in feat.attrib and feat.attrib['api'] == 'vulkan':
process_feature(feat, feat.attrib['comment'])
# And all extensions (with KHR extensions sorted to the front)
def ext_sort(ext):
if 'KHR' in ext.attrib['name']:
return int(ext.attrib['number'])
return 10000000 + int(ext.attrib['number'])
for ext in sorted(registry.findall('extensions/extension'), key=ext_sort):
# Only process vulkan extensions
if 'supported' in ext.attrib and ext.attrib['supported'] == 'vulkan':
process_feature(ext, ext.attrib['name'])
inst_commands = inst_commands.strip()
dev_commands = dev_commands.strip()
f.write('''
struct VkInstDispatchTable
{{
{inst_commands}
}};
struct VkDevDispatchTable
{{
{dev_commands}
// for consistency with macros, we declare the CreateDevice pointer here
// even though it won't actually ever get used and is on the instance dispatch chain
PFN_vkCreateDevice CreateDevice;
}};'''.format(**locals()))
| TurtleRockStudios/renderdoc_public | renderdoc/driver/vulkan/gen_dispatch_table.py | Python | mit | 6,772 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from eduid_webapp.signup.app import signup_init_app
app = signup_init_app(name='signup2')
if __name__ == '__main__':
app.logger.info(f'Starting {app}...')
app.run()
| SUNET/eduid-webapp | src/eduid_webapp/signup/run.py | Python | bsd-3-clause | 1,792 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from sentry.utils.models import update
for project in orm['sentry.Project'].objects.all():
orm['sentry.ProjectKey'].objects.create(
project=project,
user=None,
)
def backwards(self, orm):
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingprojectmember': {
'Meta': {'unique_together': "(('project', 'email'),)", 'object_name': 'PendingProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['auth.User']"})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| simmetria/sentry | src/sentry/migrations/0049_create_default_project_keys.py | Python | bsd-3-clause | 20,194 |
from datetime import date
from django.test.utils import override_settings
from .base import SitemapTestsBase
class HTTPSSitemapTests(SitemapTestsBase):
protocol = 'https'
urls = 'django.contrib.sitemaps.tests.urls.https'
def test_secure_sitemap_index(self):
"A secure sitemap index can be rendered"
response = self.client.get('/secure/index.xml')
self.assertEqual(response.content, ("""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/secure/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url).encode('utf-8'))
def test_secure_sitemap_section(self):
"A secure sitemap section can be rendered"
response = self.client.get('/secure/sitemap-simple.xml')
self.assertEqual(response.content, ("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())).encode('utf-8'))
@override_settings(SECURE_PROXY_SSL_HEADER=False)
class HTTPSDetectionSitemapTests(SitemapTestsBase):
extra = {'wsgi.url_scheme': 'https'}
def test_sitemap_index_with_https_request(self):
"A sitemap index requested in HTTPS is rendered with HTTPS links"
response = self.client.get('/simple/index.xml', **self.extra)
self.assertEqual(response.content, ("""<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url.replace('http://', 'https://')).encode('utf-8'))
def test_sitemap_section_with_https_request(self):
"A sitemap section requested in HTTPS is rendered with HTTPS links"
response = self.client.get('/simple/sitemap-simple.xml', **self.extra)
self.assertEqual(response.content, ("""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url.replace('http://', 'https://'), date.today())).encode('utf-8'))
| vsajip/django | django/contrib/sitemaps/tests/https.py | Python | bsd-3-clause | 2,330 |
from enum import Enum, EnumFactory
__all__ = ["Enum", "EnumFactory"]
| mkaluza/python-enum | enum/__init__.py | Python | gpl-3.0 | 70 |
#--------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------
import wx
import invesalius.project as prj
from invesalius.pubsub import pub as Publisher
from invesalius.gui import utils
from invesalius import constants as const
ORIENTATION_LABEL = {
const.AXIAL: _("Axial"),
const.CORONAL: _("Coronal"),
const.SAGITAL: _("Sagital"),
}
class ProjectProperties(wx.Dialog):
def __init__(self, parent):
super().__init__(id=-1, name='', parent=parent,
style=wx.DEFAULT_FRAME_STYLE, title=_('Project Properties'))
self.Center(wx.BOTH)
self._init_gui()
def _init_gui(self):
project = prj.Project()
self.name_txt = wx.TextCtrl(self, -1, value=project.name)
self.name_txt.SetMinSize((utils.calc_width_needed(self.name_txt, 30), -1))
modality_txt = wx.TextCtrl(self, -1, value=project.modality, style=wx.TE_READONLY)
try:
orientation = ORIENTATION_LABEL[project.original_orientation]
except KeyError:
orientation = _("Other")
orientation_txt = wx.TextCtrl(self, -1, value=orientation, style=wx.TE_READONLY)
sx, sy, sz = project.spacing
spacing_txt_x = wx.TextCtrl(self, -1, value=f"{sx:.5}", style=wx.TE_READONLY)
spacing_txt_y = wx.TextCtrl(self, -1, value=f"{sy:.5}", style=wx.TE_READONLY)
spacing_txt_z = wx.TextCtrl(self, -1, value=f"{sz:.5}", style=wx.TE_READONLY)
name_sizer = wx.BoxSizer(wx.HORIZONTAL)
name_sizer.Add(wx.StaticText(self, -1, _('Name')), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
name_sizer.Add(self.name_txt, 1, wx.EXPAND | wx.ALL, 5)
modality_sizer = wx.BoxSizer(wx.HORIZONTAL)
modality_sizer.Add(wx.StaticText(self, -1, _('Modality')), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
modality_sizer.Add(modality_txt, 1, wx.EXPAND | wx.ALL, 5)
orientation_sizer = wx.BoxSizer(wx.HORIZONTAL)
orientation_sizer.Add(wx.StaticText(self, -1, _('Orientation')), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
orientation_sizer.Add(orientation_txt, 1, wx.EXPAND | wx.ALL, 5)
spacing_sizer = wx.BoxSizer(wx.HORIZONTAL)
spacing_sizer.Add(wx.StaticText(self, -1, _('Spacing')), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
spacing_sizer.Add(spacing_txt_x, 1, wx.EXPAND | wx.ALL, 5)
spacing_sizer.Add(spacing_txt_y, 1, wx.EXPAND | wx.ALL, 5)
spacing_sizer.Add(spacing_txt_z, 1, wx.EXPAND | wx.ALL, 5)
btn_sizer = wx.StdDialogButtonSizer()
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_sizer.AddButton(btn_ok)
btn_sizer.AddButton(btn_cancel)
btn_sizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(name_sizer, 1, wx.EXPAND)
main_sizer.Add(modality_sizer, 1, wx.EXPAND)
main_sizer.Add(orientation_sizer, 1, wx.EXPAND)
main_sizer.Add(spacing_sizer, 1, wx.EXPAND)
main_sizer.Add(btn_sizer, 1, wx.EXPAND | wx.ALL, 5)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.Layout()
| paulojamorim/invesalius3 | invesalius/gui/project_properties.py | Python | gpl-2.0 | 4,085 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# These tests run only under Linux and Python 2.x +
# This is the Travis CI environment.
#
from pycompat import python as py
from pycompat import system
import sys
import unittest
class TestPyCompat(unittest.TestCase):
def test_python_is_64bits(self):
self.assertEqual(py.is_64bits, not py.is_32bits)
def test_is_cpython(self):
self.assertEqual(py.is_cpython, not py.is_pypy)
def test_immutability(self):
with self.assertRaises(AttributeError):
py.is2xx = 1
def test_python_is1xx(self):
self.assertFalse(py.is1xx)
def test_python_is2xx(self):
self.assertEqual(py.is2xx, sys.version_info[0] == 2)
def test_python_is3xx(self):
self.assertEqual(py.is3xx, sys.version_info[0] == 3)
def test_python_is_eqx(self):
self.assertTrue(py.is_eq(sys.version_info[0]))
def test_python_is_eqxx(self):
self.assertTrue(py.is_eq(sys.version_info[0], sys.version_info[1]))
def test_python_is_eqxxx(self):
self.assertTrue(py.is_eq(sys.version_info[0], sys.version_info[1], sys.version_info[2]))
def test_python_is_gtx(self):
self.assertTrue(py.is_gt(sys.version_info[0] - 1))
def test_python_is_gtxx(self):
self.assertTrue(py.is_gt(sys.version_info[0], sys.version_info[1] - 1))
def test_python_is_gtxxx(self):
self.assertTrue(py.is_gt(sys.version_info[0], sys.version_info[1], sys.version_info[2] - 1))
def test_python_is_ltx(self):
self.assertTrue(py.is_lt(sys.version_info[0] + 1))
def test_python_is_ltxx(self):
self.assertTrue(py.is_lt(sys.version_info[0], sys.version_info[1] + 1))
def test_python_is_ltxxx(self):
self.assertTrue(py.is_lt(sys.version_info[0], sys.version_info[1], sys.version_info[2] + 1))
def test_system_is_windows(self):
self.assertFalse(system.is_windows)
def test_system_is_cygwin(self):
self.assertFalse(system.is_cygwin)
def test_system_is_mac_os(self):
self.assertFalse(system.is_mac_os)
def test_system_is_linux(self):
self.assertTrue(system.is_linux)
if __name__ == '__main__':
unittest.main()
| alexandrevicenzi/pycompat | tests/test.py | Python | mit | 2,225 |
#
# For information about atomic writes, see
# -> http://stupidpythonideas.blogspot.com/2014/07/getting-atomic-writes-right.html
#
# Basically, if you're using Python 3.3+, good to go. Otherwise
# we'll try our best, but no guarantees.
#
import os
if hasattr(os, 'replace'): # Python 3.3+
file_replace = os.replace
elif os.name != 'nt': # Not Windows
file_replace = os.rename
else: # Windows
def file_replace(src, dst):
try:
os.unlink(dst)
except FileNotFoundError:
pass
os.rename(src, dst) | virtuald/git-source-track | git_source_track/compat.py | Python | apache-2.0 | 594 |
# Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: tmoroz.org
# 2010-2011 Large Blue
# Fergus Doyle: fergus.doyle@largeblue.com
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import unittest
from repoze.bfg import testing
from zope.interface import implements
from zope.interface import Interface
from zope.interface import taggedValue
from repoze.bfg.testing import cleanUp
from testfixtures import LogCapture
class JQueryLivesearchViewTests(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def _callFUT(self, context, request):
from opencore.views.search import jquery_livesearch_view
return jquery_livesearch_view(context, request)
def test_no_parameter(self):
context = testing.DummyModel()
request = testing.DummyRequest()
from zope.interface import Interface
from opencore.models.interfaces import ICatalogSearch
testing.registerAdapter(DummySearch, (Interface),
ICatalogSearch)
response = self._callFUT(context, request)
self.assertEqual(response.status, '400 Bad Request')
def test_with_parameter_noresults(self):
def dummy_factory(context, request, term):
def results():
return 0, [], None
return results
from repoze.lemonade.testing import registerListItem
from opencore.models.interfaces import IGroupSearchFactory
registerListItem(IGroupSearchFactory, dummy_factory, 'dummy1',
title='Dummy1', sort_key=1)
context = testing.DummyModel()
request = testing.DummyRequest()
dummycontent = testing.DummyModel()
request.params = {
'val': 'somesearch',
}
response = self._callFUT(context, request)
self.assertEqual(response.status, '200 OK')
from simplejson import loads
results = loads(response.body)
self.assertEqual(len(results), 2)
self.assertEqual(results[0]['rowclass'], 'showall')
self.assertEqual(results[0]['header'], '')
self.assertEqual(results[0]['title'], 'Show All')
self.assertEqual(results[1]['header'], 'Dummy1')
self.assertEqual(results[1]['title'], 'No Result')
def test_with_parameter_withresults(self):
def dummy_factory1(context, request, term):
pass
def dummy_factory2(context, request, term):
def results():
return 1, [1], lambda x: testing.DummyModel(title='yo')
return results
from repoze.lemonade.testing import registerListItem
from opencore.models.interfaces import IGroupSearchFactory
registerListItem(IGroupSearchFactory, dummy_factory1, 'dummy1',
title='Dummy1', sort_key=1)
registerListItem(IGroupSearchFactory, dummy_factory2, 'dummy2',
title='Dummy2', sort_key=2)
context = testing.DummyModel()
request = testing.DummyRequest()
dummycontent = testing.DummyModel()
request.params = {
'val': 'somesearch',
}
response = self._callFUT(context, request)
self.assertEqual(response.status, '200 OK')
from simplejson import loads
results = loads(response.body)
self.assertEqual(len(results), 3)
self.assertEqual(results[0]['rowclass'], 'showall')
self.assertEqual(results[0]['header'], '')
self.assertEqual(results[0]['title'], 'Show All')
self.assertEqual(results[1]['header'], 'Dummy2')
self.assertEqual(results[1]['title'], 'yo')
self.assertEqual(response.content_type, 'application/x-json')
class SearchResultsViewTests(unittest.TestCase):
def setUp(self):
self.log = LogCapture()
cleanUp()
testing.registerDummyRenderer('opencore.views:templates/generic_layout.pt')
testing.registerDummyRenderer(
'opencore.views:templates/community_layout.pt')
def tearDown(self):
self.log.uninstall()
cleanUp()
def _callFUT(self, context, request):
from opencore.views.search import SearchResultsView
from opencore.views.api import get_template_api
request.api = get_template_api(context, request)
view = SearchResultsView(context, request)
view.type_to_result_dict[DummyContent] = 'test-content'
return view()
def test_no_searchterm(self):
from webob.multidict import MultiDict
context = testing.DummyModel()
request = testing.DummyRequest(params=MultiDict())
from opencore.models.interfaces import ICatalogSearch
testing.registerAdapter(DummyEmptySearch, (Interface),
ICatalogSearch)
result = self._callFUT(context, request)
#self.assertEqual(result.status, '404 Not Found')
def test_bad_kind(self):
from webob.multidict import MultiDict
context = testing.DummyModel()
request = testing.DummyRequest(
params=MultiDict({'kind':'unknown', 'body':'yo'}))
from zope.interface import Interface
from opencore.models.interfaces import ICatalogSearch
from webob.exc import HTTPBadRequest
testing.registerAdapter(DummyEmptySearch, (Interface),
ICatalogSearch)
self.assertRaises(HTTPBadRequest, self._callFUT, context, request)
def test_none_kind(self):
from webob.multidict import MultiDict
context = testing.DummyModel()
request = testing.DummyRequest(params=MultiDict({'body':'yo'}))
from zope.interface import Interface
from opencore.models.interfaces import ICatalogSearch
from repoze.lemonade.testing import registerContentFactory
registerContentFactory(DummyContent, IDummyContent)
testing.registerAdapter(DummySearch, (Interface),
ICatalogSearch)
result = self._callFUT(context, request)
self.assertEqual(result['terms'], ['yo'])
self.assertEqual(len(result['results']), 1)
def test_known_kind(self):
from webob.multidict import MultiDict
from opencore.models.interfaces import IGroupSearchFactory
from repoze.lemonade.testing import registerContentFactory
from zope.interface import Interface
content = DummyContent()
def search_factory(*arg, **kw):
return DummySearchFactory(content)
testing.registerUtility(
search_factory, IGroupSearchFactory, name='People')
context = testing.DummyModel()
request = testing.DummyRequest(
params=MultiDict({'body':'yo', 'kind':'People'}))
from opencore.models.interfaces import ICatalogSearch
registerContentFactory(DummyContent, IDummyContent)
testing.registerAdapter(DummySearch, (Interface),
ICatalogSearch)
result = self._callFUT(context, request)
self.assertEqual(result['terms'], ['yo', 'People'])
self.assertEqual(len(result['results']), 1)
def test_community_search(self):
context = testing.DummyModel()
context.title = 'Citizens'
from webob.multidict import MultiDict
from opencore.models.interfaces import ICommunity
from zope.interface import directlyProvides
directlyProvides(context, ICommunity)
request = testing.DummyRequest(params=MultiDict({'body':'yo'}))
from zope.interface import Interface
from opencore.models.interfaces import ICatalogSearch
from repoze.lemonade.testing import registerContentFactory
registerContentFactory(DummyContent, IDummyContent)
testing.registerAdapter(DummySearch, (Interface),
ICatalogSearch)
result = self._callFUT(context, request)
self.assertEqual(result['community'], 'Citizens')
self.assertEqual(result['terms'], ['yo'])
self.assertEqual(len(result['results']), 1)
def test_parse_error(self):
from webob.multidict import MultiDict
context = testing.DummyModel()
request = testing.DummyRequest(params=MultiDict({'body':'the'}))
from zope.interface import Interface
from opencore.models.interfaces import ICatalogSearch
from repoze.lemonade.testing import registerContentFactory
registerContentFactory(DummyContent, IDummyContent)
testing.registerAdapter(ParseErrorSearch, (Interface),
ICatalogSearch)
result = self._callFUT(context, request)
self.assertEqual(len(result['terms']), 0)
self.assertEqual(len(result['results']), 0)
self.assertEqual(result['error'], "Error: 'the' is nonsense")
class GetBatchTests(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def _callFUT(self, context, request):
from opencore.views.search import get_batch
return get_batch(context, request)
def test_without_kind_with_terms(self):
from webob.multidict import MultiDict
from opencore.models.interfaces import ICatalogSearch
testing.registerAdapter(DummySearch, (Interface),
ICatalogSearch)
request = testing.DummyRequest(
params=MultiDict({'body':'yo'}))
context = testing.DummyModel()
result = self._callFUT(context, request)
self.assertEqual(result[0]['total'], 1)
def test_without_kind_without_terms(self):
from webob.multidict import MultiDict
from opencore.models.interfaces import ICatalogSearch
testing.registerAdapter(DummySearch, (Interface),
ICatalogSearch)
request = testing.DummyRequest(params=MultiDict({}))
context = testing.DummyModel()
result = self._callFUT(context, request)
self.assertEqual(len(result), 2)
def test_with_kind_with_body(self):
from opencore.models.interfaces import IGroupSearchFactory
from repoze.lemonade.testing import registerListItem
from webob.multidict import MultiDict
content = DummyContent()
def search_factory(*arg, **kw):
return DummySearchFactory(content)
registerListItem(IGroupSearchFactory, search_factory, 'dummy1',
title='Dummy1', sort_key=1)
request = testing.DummyRequest(
params=MultiDict({'body':'yo', 'kind':'dummy1'}))
context = testing.DummyModel()
result = self._callFUT(context, request)
self.assertEqual(result[0]['total'], 1)
def test_bad_kind_with_body(self):
from webob.multidict import MultiDict
from webob.exc import HTTPBadRequest
request = testing.DummyRequest(
params=MultiDict({'body':'yo', 'kind':'doesntexist'}))
context = testing.DummyModel()
self.assertRaises(HTTPBadRequest, self._callFUT, context, request)
def test_with_kind_without_body(self):
from opencore.models.interfaces import IGroupSearchFactory
from repoze.lemonade.testing import registerListItem
from webob.multidict import MultiDict
def dummy_factory(context, request, term):
def results():
return 0, [], None
return results
registerListItem(IGroupSearchFactory, dummy_factory, 'dummy1',
title='Dummy1', sort_key=1)
request = testing.DummyRequest(
params=MultiDict({'kind':'dummy1'}))
context = testing.DummyModel()
result = self._callFUT(context, request)
self.assertEqual(result, (None, ()))
class MakeQueryTests(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def _callFUT(self, params):
from webob.multidict import MultiDict
from opencore.views.search import make_query
context = testing.DummyModel()
request = testing.DummyRequest(params=MultiDict(params))
return make_query(context, request)
def test_body_field(self):
from repoze.lemonade.interfaces import IContent
query, terms = self._callFUT({'body': 'yo'})
self.assertEqual(query, {
'texts': 'yo',
'interfaces': {'operator': 'or', 'query': []},
'sort_index': 'texts',
})
self.assertEqual(terms, ['yo'])
def test_creator_field(self):
from zope.interface import Interface
from zope.interface import implements
from opencore.models.interfaces import ICatalogSearch
from opencore.models.interfaces import IProfile
searched_for = {}
class Profile:
implements(IProfile)
profile = Profile()
profile.__name__ = 'admin'
class ProfileSearch:
def __init__(self, context):
pass
def __call__(self, **kw):
searched_for.update(kw)
return 1, [1], lambda x: profile
testing.registerAdapter(ProfileSearch, (Interface),
ICatalogSearch)
query, terms = self._callFUT({'creator': 'Ad'})
self.assertEquals(searched_for,
{'texts': 'Ad', 'interfaces': [IProfile]})
from repoze.lemonade.interfaces import IContent
self.assertEqual(query, {
'creator': {'query': ['admin'], 'operator': 'or'},
'interfaces': {'operator': 'or', 'query': []},
})
self.assertEqual(terms, ['Ad'])
def test_types_field(self):
from opencore.models.interfaces import IComment
from repoze.lemonade.testing import registerContentFactory
registerContentFactory(DummyContent, IComment)
query, terms = self._callFUT(
{'types': 'opencore_models_interfaces_IComment'})
self.assertEqual(query, {'interfaces':
{'query': [IComment], 'operator': 'or'}})
self.assertEqual(terms, ['Comment'])
def test_tags_field(self):
from repoze.lemonade.interfaces import IContent
query, terms = self._callFUT({'tags': 'a'})
self.assertEqual(query, {
'interfaces': {'operator': 'or', 'query': []},
'tags': {'query': ['a'], 'operator': 'or'},
})
self.assertEqual(terms, ['a'])
def test_year_field(self):
from repoze.lemonade.interfaces import IContent
query, terms = self._callFUT({'year': '1990'})
self.assertEqual(query,
{'creation_date': (6311520, 6626483), 'interfaces': {'operator': 'or', 'query': []}})
self.assertEqual(terms, [1990])
class AdvancedSearchViewTests(unittest.TestCase):
def setUp(self):
cleanUp()
def tearDown(self):
cleanUp()
def test_advancedsearch_view(self):
from opencore.models.interfaces import IComment
from repoze.lemonade.testing import registerContentFactory
registerContentFactory(DummyContent, IComment)
context = testing.DummyModel()
request = testing.DummyRequest()
from opencore.views.api import get_template_api
request.api = get_template_api(context, request)
from opencore.views.search import advancedsearch_view
result = advancedsearch_view(context, request)
self.assertEqual(
result['post_url'], 'http://example.com/searchresults.html')
self.assertEqual(result['type_choices'], [
('Comment', 'opencore_models_interfaces_IComment'),
])
self.assertFalse('2006' in result['year_choices'])
self.assertTrue('2007' in result['year_choices'])
class DummySearch:
def __init__(self, context):
pass
def __call__(self, **kw):
return 1, [1], lambda x: dummycontent
class DummyEmptySearch:
def __init__(self, context):
pass
def __call__(self, **kw):
return 0, [], lambda x: None
class ParseErrorSearch:
def __init__(self, context):
pass
def __call__(self, texts, **kw):
from zope.index.text.parsetree import ParseError
raise ParseError("'%s' is nonsense" % texts)
class DummySearchFactory:
def __init__(self, content):
self.content = content
def get_batch(self):
return {'entries':[self.content], 'total':1}
class IDummyContent(Interface):
taggedValue('name', 'dummy')
class DummyContent(testing.DummyModel):
implements(IDummyContent)
dummycontent = DummyContent()
| amarandon/opencore | opencore/views/tests/test_search.py | Python | gpl-2.0 | 17,417 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline tests."""
from absl.testing import parameterized
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
from spin_spherical_cnns import input_pipeline
from spin_spherical_cnns.configs import default
class InputPipelineTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters("spherical_mnist/rotated",
"spherical_mnist/canonical")
def test_create_datasets_spherical_mnist(self, dataset):
rng = jax.random.PRNGKey(42)
config = default.get_config()
config.dataset = dataset
config.per_device_batch_size = 8
config.eval_pad_last_batch = False
dataset_loaded = False
if not dataset_loaded:
splits = input_pipeline.create_datasets(config, rng)
self.assertEqual(splits.info.features["label"].num_classes, 10)
self.assertEqual(splits.train.element_spec["input"].shape,
(1, 8, 64, 64, 1, 1))
self.assertEqual(splits.train.element_spec["label"].shape, (1, 8))
self.assertEqual(splits.validation.element_spec["input"].shape,
(1, 8, 64, 64, 1, 1))
self.assertEqual(splits.validation.element_spec["label"].shape, (1, 8))
self.assertEqual(splits.test.element_spec["input"].shape,
(1, 8, 64, 64, 1, 1))
self.assertEqual(splits.test.element_spec["label"].shape, (1, 8))
if __name__ == "__main__":
tf.test.main()
| google-research/google-research | spin_spherical_cnns/input_pipeline_test.py | Python | apache-2.0 | 2,021 |
""" Handles setting up voters so an election can be called """
import logging
import socket
import threading
import time
import SocketServer
from .config import Config
from .fle import FastLeaderElection
from .serialization import read_string, write_string
from .state import State
from .vote import Vote
class Voter(threading.Thread):
"""
A peer receives connections from peers w/ > id and connects to peers w/
a lower id.
It then sends & receives votes until a leader is elected.
"""
class ServerHandler(SocketServer.BaseRequestHandler):
def handle(self):
"""
loop & exchange votes w/ the remote peer's vote
TODO: check if a connection exists for this peer & reject if so
"""
myid = self.server.voter.config.myid
voter = self.server.voter
self.request.settimeout(10)
while voter.running:
try:
data = read_string(self.request)
except socket.timeout:
# that's ok, just try again
continue
if data is None:
logging.error("client went away")
break
try:
othervote = Vote.parse(data)
logging.info("received vote from client: %s", othervote)
voter.update_vote(othervote)
except ValueError:
logging.error("badly serialized vote: %s", data)
break
self.request.sendall(write_string(voter.vote))
class Server(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
allow_reuse_address = True
voter = None
class Client(threading.Thread):
""" handles connection to a remote peer """
TIMEOUT = 5
def __init__(self, voter, pconfig):
super(Voter.Client, self).__init__()
self.setDaemon(True)
self.running = False
self.voter = voter
self.pconfig = pconfig
self.myid = voter.config.myid
self.start()
def run(self):
""" main loop """
logging.info("Connecting to peer %d (myid=%d)", self.pconfig.peer_id, self.myid)
self.running = True
timeout = Voter.Client.TIMEOUT
endpoint = self.pconfig.election_endpoint
voter = self.voter
while self.running:
# first, lets connect
try:
sock = socket.create_connection(endpoint, timeout)
except socket.error as se:
logging.error("connection error: %s", se)
time.sleep(3)
continue
# next, send out vote every 60 secs
while self.running:
try:
sock.sendall(write_string(voter.vote))
data = read_string(sock)
if data is None:
logging.error("server went away")
our_vote_changed = False
try:
othervote = Vote.parse(data)
logging.info("received vote from server: %s", othervote)
our_vote_changed = voter.update_vote(othervote)
except ValueError:
logging.error("badly serialized vote: %s", data)
sock.close()
break
# if our vote changed, don't sleep! send it out immediately
if not our_vote_changed:
# sleep for 60 seconds, but in small bits to check if we are still running
for _ in xrange(0, 600):
if not self.running:
break
time.sleep(0.1)
except socket.error as se:
logging.error("failed to read/write: %s", se)
sock.close()
break
logging.info("exiting Voter.Client's main loop")
def __init__(self, confs, zxid=0x0):
""" parse conf """
super(Voter, self).__init__()
self.setDaemon(True)
self.running = False
self.config = Config.parse(confs)
self.state = State.LOOKING
self.zxid = zxid
# initially, we vote for ourselves
myid = self.config.myid
self.fle_lock = threading.Lock()
self.fle = FastLeaderElection(self.config.members)
self.fle.update(
Vote(self.config.myid, self.state, self.config.myid, self.zxid)
)
self.start()
@property
def vote(self):
"""
this voter's vote
"""
return self.fle.get(self.config.myid)
def update_vote(self, othervote):
"""
update the vote (and check if our vote needs to change)
"""
assert othervote.myid != self.config.myid
self.fle.update(othervote)
# should our vote change?
with self.fle_lock:
if othervote > self.vote:
newvote = Vote(self.vote.myid, self.state, othervote.proposed_id, othervote.zxid)
self.fle.update(newvote)
return True
return False
@property
def leader_id(self):
"""
the elected leader, if any
"""
return self.fle.leader_id
def run(self):
self.running = True
server = Voter.Server(self.config.election_endpoint, Voter.ServerHandler)
server.voter = self
ip, port = server.server_address
self.name = "Voter({}:{})".format(ip, port)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.name = "VoterServer({}:{})".format(ip, port)
server_thread.daemon = True
server_thread.start()
logging.info("Server loop running in thread: %s", server_thread.name)
clients = []
for pconfig in self.config.peers:
if self.config.myid > pconfig.peer_id:
clients.append(Voter.Client(self, pconfig))
while self.running:
time.sleep(0.5)
# shutdown
for client in clients:
client.running = False
while client.isAlive():
time.sleep(0.1)
server.shutdown()
server = None
| rgs1/pyzab | pyzab/voter.py | Python | apache-2.0 | 6,612 |
"""
Interface for Cobbler's XMLRPC API(s).
there are two:
a read-only API that koan uses
a read-write API that requires logins
Copyright 2007-2008, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import sys
import socket
import time
import os
import base64
import SimpleXMLRPCServer
import xmlrpclib
import random
import stat
import base64
import fcntl
import string
import traceback
import glob
import sub_process as subprocess
import api as cobbler_api
import utils
from cexceptions import *
import item_distro
import item_profile
import item_system
import item_repo
import item_image
from utils import *
from utils import _
# FIXME: make configurable?
TOKEN_TIMEOUT = 60*60 # 60 minutes
CACHE_TIMEOUT = 10*60 # 10 minutes
TOKEN_CACHE = {}
# *********************************************************************
# *********************************************************************
class CobblerXMLRPCInterface:
"""
This is the interface used for all XMLRPC methods, for instance,
as used by koan or CobblerWeb
note: public methods take an optional parameter token that is just
here for consistancy with the ReadWrite API. Read write operations do
require the token.
"""
def __init__(self,api,enable_auth_if_relevant):
self.api = api
self.auth_enabled = enable_auth_if_relevant
self.logger = self.api.logger
self.token_cache = TOKEN_CACHE
self.object_cache = {}
self.timestamp = self.api.last_modified_time()
random.seed(time.time())
def __sorter(self,a,b):
return cmp(a["name"],b["name"])
def last_modified_time(self):
"""
Return the time of the last modification to any object
so that we can tell if we need to check for any other
modified objects via more specific calls.
"""
return self.api.last_modified_time()
def update(self, token=None):
# no longer neccessary
return True
def internal_cache_update(self, collection_type, name):
self._log("DEBUG: adding to %s, %s" % (collection_type, name))
if name is None:
return False
data = self.api.deserialize_item_raw(collection_type, name)
if collection_type == "distro":
obj = item_distro.Distro(self.api._config)
obj.from_datastruct(data)
self.api.add_distro(obj, False, False)
if collection_type == "profile":
subprofile = False
if data.has_key("parent") and data["parent"] != "":
subprofile = True
obj = item_profile.Profile(self.api._config, is_subobject = subprofile)
obj.from_datastruct(data)
self.api.add_profile(obj, False, False)
if collection_type == "system":
obj = item_system.System(self.api._config)
obj.from_datastruct(data)
self.api.add_system(obj, False, False, False)
if collection_type == "repo":
obj = item_repo.Repo(self.api._config)
obj.from_datastruct(data)
self.api.add_repo(obj, False, False)
if collection_type == "image":
obj = item_image.Image(self.api._config)
obj.from_datastruct(data)
self.api.add_image(obj, False, False)
return True
def internal_cache_remove(self, collection_type, name):
self._log("DEBUG: removing from %s, %s" % (collection_type, name))
data = self.api.deserialize_item_raw(collection_type, name)
if data is None:
if collection_type == "distro":
self.api.remove_distro(name, delete=False, recursive=True, with_triggers=False)
if collection_type == "profile":
self.api.remove_profile(name, delete=False, recursive=True, with_triggers=False)
if collection_type == "system":
self.api.remove_system(name, delete=False, recursive=True, with_triggers=False)
if collection_type == "repo":
self.api.remove_repo(name, delete=False, recursive=True, with_triggers=False)
if collection_type == "image":
self.api.remove_image(name, delete=False, recursive=True, with_triggers=False)
return True
def ping(self):
return True
def get_user_from_token(self,token):
if not TOKEN_CACHE.has_key(token):
raise CX(_("invalid token: %s") % token)
else:
return self.token_cache[token][1]
def _log(self,msg,user=None,token=None,name=None,object_id=None,attribute=None,debug=False,error=False):
# add the user editing the object, if supplied
m_user = "?"
if user is not None:
m_user = user
if token is not None:
try:
m_user = self.get_user_from_token(token)
except:
# invalid or expired token?
m_user = "???"
msg = "REMOTE %s; user(%s)" % (msg, m_user)
if name is not None:
msg = "%s; name(%s)" % (msg, name)
if object_id is not None:
msg = "%s; object_id(%s)" % (msg, object_id)
# add any attributes being modified, if any
if attribute:
msg = "%s; attribute(%s)" % (msg, attribute)
# log to the correct logger
if error:
logger = self.logger.error
elif debug:
logger = self.logger.debug
else:
logger = self.logger.info
logger(msg)
def get_size(self,collection_name,**rest):
"""
Returns the number of entries in a collection (but not the actual
collection) for WUI/TUI interfaces that want to paginate the results.
"""
data = self.__get_all(collection_name)
return len(data)
def __get_all(self,collection_name,page=None,results_per_page=None):
"""
Helper method to return all data to the WebUI or another caller
without going through the process of loading all the data into
objects and recalculating.
Supports pagination for WUI or TUI based interfaces.
"""
# FIXME: a global lock or module around data access loading
# would be useful for non-db backed storage
if collection_name == "settings":
data = self.api.deserialize_raw("settings")
return self.xmlrpc_hacks(data)
else:
contents = []
if collection_name.startswith("distro"):
contents = self.api.distros()
elif collection_name.startswith("profile"):
contents = self.api.profiles()
elif collection_name.startswith("system"):
contents = self.api.systems()
elif collection_name.startswith("repo"):
contents = self.api.repos()
elif collection_name.startswith("image"):
contents = self.api.images()
else:
raise CX("internal error, collection name is %s" % collection_name)
# FIXME: speed this up
data = contents.to_datastruct_with_cache()
total_items = len(data)
data.sort(self.__sorter)
if page is not None and results_per_page is not None:
page = int(page)
results_per_page = int(results_per_page)
if page < 0:
return []
if results_per_page <= 0:
return []
start_point = (results_per_page * page)
end_point = (results_per_page * page) + results_per_page
if start_point > total_items:
start_point = total_items - 1 # correct ???
if end_point > total_items:
end_point = total_items
data = self.xmlrpc_hacks(data[start_point:end_point])
return self.xmlrpc_hacks(data)
def get_kickstart_templates(self,token=None,**rest):
"""
Returns all of the kickstarts that are in use by the system.
"""
self._log("get_kickstart_templates",token=token)
#self.check_access(token, "get_kickstart_templates")
return utils.get_kickstart_templates(self.api)
def is_kickstart_in_use(self,ks,token=None,**rest):
self._log("is_kickstart_in_use",token=token)
for x in self.api.profiles():
if x.kickstart is not None and x.kickstart == ks:
return True
for x in self.api.systems():
if x.kickstart is not None and x.kickstart == ks:
return True
return False
def generate_kickstart(self,profile=None,system=None,REMOTE_ADDR=None,REMOTE_MAC=None,**rest):
self._log("generate_kickstart")
return self.api.generate_kickstart(profile,system)
def get_settings(self,token=None,**rest):
"""
Return the contents of /etc/cobbler/settings, which is a hash.
"""
self._log("get_settings",token=token)
results = self.api.settings().to_datastruct()
self._log("my settings are: %s" % results)
return self.xmlrpc_hacks(results)
def get_repo_config_for_profile(self,profile_name,**rest):
"""
Return the yum configuration a given profile should use to obtain
all of it's cobbler associated repos.
"""
obj = self.api.find_profile(profile_name)
if obj is None:
return "# object not found: %s" % profile_name
return self.api.get_repo_config_for_profile(obj)
def get_repo_config_for_system(self,system_name,**rest):
"""
Return the yum configuration a given profile should use to obtain
all of it's cobbler associated repos.
"""
obj = self.api.find_system(system_name)
if obj is None:
return "# object not found: %s" % system_name
return self.api.get_repo_config_for_system(obj)
def get_template_file_for_profile(self,profile_name,path,**rest):
"""
Return the templated file requested for this profile
"""
obj = self.api.find_profile(profile_name)
if obj is None:
return "# object not found: %s" % profile_name
return self.api.get_template_file_for_profile(obj,path)
def get_template_file_for_system(self,system_name,path,**rest):
"""
Return the templated file requested for this system
"""
obj = self.api.find_system(system_name)
if obj is None:
return "# object not found: %s" % system_name
return self.api.get_template_file_for_system(obj,path)
def register_new_system(self,info,token=None,**rest):
"""
If register_new_installs is enabled in settings, this allows
/usr/bin/cobbler-register (part of the koan package) to add
new system records remotely if they don't already exist.
There is a cobbler_register snippet that helps with doing
this automatically for new installs but it can also be used
for existing installs. See "AutoRegistration" on the Wiki.
"""
enabled = self.api.settings().register_new_installs
if not str(enabled) in [ "1", "y", "yes", "true" ]:
raise CX("registration is disabled in cobbler settings")
# validate input
name = info.get("name","")
profile = info.get("profile","")
hostname = info.get("hostname","")
interfaces = info.get("interfaces",{})
ilen = len(interfaces.keys())
if name == "":
raise CX("no system name submitted")
if profile == "":
raise CX("profile not submitted")
if ilen == 0:
raise CX("no interfaces submitted")
if ilen >= 64:
raise CX("too many interfaces submitted")
# validate things first
name = info.get("name","")
inames = interfaces.keys()
if self.api.find_system(name=name):
raise CX("system name conflicts")
if hostname != "" and self.api.find_system(hostname=hostname):
raise CX("hostname conflicts")
for iname in inames:
mac = info["interfaces"][iname].get("mac_address","")
ip = info["interfaces"][iname].get("ip_address","")
if ip.find("/") != -1:
raise CX("no CIDR ips are allowed")
if mac == "":
raise CX("missing MAC address for interface %s" % iname)
if mac != "":
system = self.api.find_system(mac_address=mac)
if system is not None:
raise CX("mac conflict: %s" % mac)
if ip != "":
system = self.api.find_system(ip_address=ip)
if system is not None:
raise CX("ip conflict: %s"% ip)
# looks like we can go ahead and create a system now
obj = self.api.new_system()
obj.set_profile(profile)
obj.set_name(name)
if hostname != "":
obj.set_hostname(hostname)
obj.set_netboot_enabled(False)
for iname in inames:
mac = info["interfaces"][iname].get("mac_address","")
ip = info["interfaces"][iname].get("ip_address","")
netmask = info["interfaces"][iname].get("netmask","")
obj.set_mac_address(mac, iname)
if hostname != "":
obj.set_dns_name(hostname, iname)
if ip != "":
obj.set_ip_address(ip, iname)
if netmask != "":
obj.set_subnet(netmask, iname)
self.api.add_system(obj)
return 0
def disable_netboot(self,name,token=None,**rest):
"""
This is a feature used by the pxe_just_once support, see manpage.
Sets system named "name" to no-longer PXE. Disabled by default as
this requires public API access and is technically a read-write operation.
"""
self._log("disable_netboot",token=token,name=name)
# used by nopxe.cgi
if not self.api.settings().pxe_just_once:
# feature disabled!
return False
systems = self.api.systems()
obj = systems.find(name=name)
if obj == None:
# system not found!
return False
obj.set_netboot_enabled(0)
# disabling triggers and sync to make this extremely fast.
systems.add(obj,save=True,with_triggers=False,with_sync=False,quick_pxe_update=True)
return True
def upload_log_data(self, sys_name, file, size, offset, data, token=None,**rest):
"""
This is a logger function used by the "anamon" logging system to
upload all sorts of auxilliary data from Anaconda.
As it's a bit of a potential log-flooder, it's off by default
and needs to be enabled in /etc/cobbler/settings.
"""
self._log("upload_log_data (file: '%s', size: %s, offset: %s)" % (file, size, offset), token=token, name=sys_name)
# Check if enabled in self.api.settings()
if not self.api.settings().anamon_enabled:
# feature disabled!
return False
# Find matching system record
systems = self.api.systems()
obj = systems.find(name=sys_name)
if obj == None:
# system not found!
self._log("upload_log_data - system '%s' not found" % sys_name, token=token, name=sys_name)
return False
return self.__upload_file(sys_name, file, size, offset, data)
def __upload_file(self, sys_name, file, size, offset, data):
'''
system: the name of the system
name: the name of the file
size: size of contents (bytes)
data: base64 encoded file contents
offset: the offset of the chunk
files can be uploaded in chunks, if so the size describes
the chunk rather than the whole file. the offset indicates where
the chunk belongs
the special offset -1 is used to indicate the final chunk'''
contents = base64.decodestring(data)
del data
if offset != -1:
if size is not None:
if size != len(contents):
return False
#XXX - have an incoming dir and move after upload complete
# SECURITY - ensure path remains under uploadpath
tt = string.maketrans("/","+")
fn = string.translate(file, tt)
if fn.startswith('..'):
raise CX(_("invalid filename used: %s") % fn)
# FIXME ... get the base dir from cobbler settings()
udir = "/var/log/cobbler/anamon/%s" % sys_name
if not os.path.isdir(udir):
os.mkdir(udir, 0755)
fn = "%s/%s" % (udir, fn)
try:
st = os.lstat(fn)
except OSError, e:
if e.errno == errno.ENOENT:
pass
else:
raise
else:
if not stat.S_ISREG(st.st_mode):
raise CX(_("destination not a file: %s") % fn)
fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0644)
# log_error("fd=%r" %fd)
try:
if offset == 0 or (offset == -1 and size == len(contents)):
#truncate file
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
try:
os.ftruncate(fd, 0)
# log_error("truncating fd %r to 0" %fd)
finally:
fcntl.lockf(fd, fcntl.LOCK_UN)
if offset == -1:
os.lseek(fd,0,2)
else:
os.lseek(fd,offset,0)
#write contents
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB, len(contents), 0, 2)
try:
os.write(fd, contents)
# log_error("wrote contents")
finally:
fcntl.lockf(fd, fcntl.LOCK_UN, len(contents), 0, 2)
if offset == -1:
if size is not None:
#truncate file
fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
try:
os.ftruncate(fd, size)
# log_error("truncating fd %r to size %r" % (fd,size))
finally:
fcntl.lockf(fd, fcntl.LOCK_UN)
finally:
os.close(fd)
return True
def run_install_triggers(self,mode,objtype,name,ip,token=None,**rest):
"""
This is a feature used to run the pre/post install triggers.
See CobblerTriggers on Wiki for details
"""
self._log("run_install_triggers",token=token)
if mode != "pre" and mode != "post":
return False
if objtype != "system" and objtype !="profile":
return False
# the trigger script is called with name,mac, and ip as arguments 1,2, and 3
# we do not do API lookups here because they are rather expensive at install
# time if reinstalling all of a cluster all at once.
# we can do that at "cobbler check" time.
utils.run_triggers(self.api, None, "/var/lib/cobbler/triggers/install/%s/*" % mode, additional=[objtype,name,ip])
return True
def version(self,token=None,**rest):
"""
Return the cobbler version for compatibility testing with remote applications.
See api.py for documentation.
"""
self._log("version",token=token)
return self.api.version()
def extended_version(self,token=None,**rest):
"""
Returns the full dictionary of version information. See api.py for documentation.
"""
self._log("version",token=token)
return self.api.version(extended=True)
def get_distros(self,page=None,results_per_page=None,token=None,**rest):
"""
Returns all cobbler distros as an array of hashes.
"""
self._log("get_distros",token=token)
return self.__get_all("distro",page,results_per_page)
def __find(self,find_function,criteria={},expand=False,token=None):
name = criteria.get("name",None)
if name is not None:
del criteria["name"]
if not expand:
data = [x.name for x in find_function(name, True, True, **criteria)]
else:
data = [x.to_datastruct_with_cache() for x in find_function(name, True, True, **criteria)]
return self.xmlrpc_hacks(data)
def find_distro(self,criteria={},expand=False,token=None,**rest):
self._log("find_distro", token=token)
# FIXME DEBUG
self._log(criteria)
data = self.__find(self.api.find_distro,criteria,expand=expand,token=token)
# FIXME DEBUG
self._log(data)
return data
def find_profile(self,criteria={},expand=False,token=None,**rest):
self._log("find_profile", token=token)
data = self.__find(self.api.find_profile,criteria,expand=expand,token=token)
return data
def find_system(self,criteria={},expand=False,token=None,**rest):
self._log("find_system", token=token)
data = self.__find(self.api.find_system,criteria,expand=expand,token=token)
return data
def find_repo(self,criteria={},expand=False,token=None,**rest):
self._log("find_repo", token=token)
data = self.__find(self.api.find_repo,criteria,expand=expand,token=token)
return data
def find_image(self,criteria={},expand=False,token=None,**rest):
self._log("find_image", token=token)
data = self.__find(self.api.find_image,criteria,expand=expand,token=token)
return data
def get_distros_since(self,mtime):
"""
Return all of the distro objects that have been modified
after mtime.
"""
data = self.api.get_distros_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_profiles_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_profiles_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_systems_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_systems_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_repos_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_repos_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_images_since(self,mtime):
"""
See documentation for get_distros_since
"""
data = self.api.get_images_since(mtime, collapse=True)
return self.xmlrpc_hacks(data)
def get_profiles(self,page=None,results_per_page=None,token=None,**rest):
"""
Returns all cobbler profiles as an array of hashes.
"""
self._log("get_profiles",token=token)
return self.__get_all("profile",page,results_per_page)
def get_systems(self,page=None,results_per_page=None,token=None,**rest):
"""
Returns all cobbler systems as an array of hashes.
"""
self._log("get_systems",token=token)
return self.__get_all("system",page,results_per_page)
def get_repos(self,page=None,results_per_page=None,token=None,**rest):
"""
Returns all cobbler repos as an array of hashes.
"""
self._log("get_repos",token=token)
return self.__get_all("repo",page,results_per_page)
def get_repos_compatible_with_profile(self,profile=None,token=None,**rest):
"""
Get repos that can be used with a given profile name
"""
self._log("get_repos_compatible_with_profile",token=token)
profile = self.api.find_profile(profile)
if profile is None:
return -1
results = []
distro = profile.get_conceptual_parent()
repos = self.get_repos()
for r in repos:
# there be dragons!
# accept all repos that are src/noarch
# but otherwise filter what repos are compatible
# with the profile based on the arch of the distro.
if r["arch"] is None or r["arch"] in [ "", "noarch", "src" ]:
results.append(r)
else:
# some backwards compatibility fuzz
# repo.arch is mostly a text field
# distro.arch is i386/x86_64/ia64/s390x/etc
if r["arch"] in [ "i386", "x86", "i686" ]:
if distro.arch in [ "i386", "x86" ]:
results.append(r)
elif r["arch"] in [ "x86_64" ]:
if distro.arch in [ "x86_64" ]:
results.append(r)
elif r["arch"].startswith("s390"):
if distro.arch in [ "s390x" ]:
results.append(r)
else:
if distro.arch == r["arch"]:
results.append(r)
return results
def get_images(self,page=None,results_per_page=None,token=None,**rest):
"""
Returns all cobbler images as an array of hashes.
"""
self._log("get_images",token=token)
return self.__get_all("image",page,results_per_page)
def __get_specific(self,collection_type,name,flatten=False):
"""
Internal function to return a hash representation of a given object if it exists,
otherwise an empty hash will be returned.
"""
result = self.api.deserialize_item_raw(collection_type, name)
if result is None:
return {}
if flatten:
result = utils.flatten(result)
return self.xmlrpc_hacks(result)
def get_distro(self,name,flatten=False,token=None,**rest):
"""
Returns the distro named "name" as a hash.
"""
self._log("get_distro",token=token,name=name)
return self.__get_specific("distro",name,flatten=flatten)
def get_profile(self,name,flatten=False,token=None,**rest):
"""
Returns the profile named "name" as a hash.
"""
self._log("get_profile",token=token,name=name)
return self.__get_specific("profile",name,flatten=flatten)
def get_system(self,name,flatten=False,token=None,**rest):
"""
Returns the system named "name" as a hash.
"""
self._log("get_system",name=name,token=token)
return self.__get_specific("system",name,flatten=flatten)
# this is used by the puppet external nodes feature
def find_system_by_dns_name(self,dns_name):
# FIXME: implement using api.py's find API
# and expose generic finds for other methods
# WARNING: this function is /not/ expected to stay in cobbler long term
systems = self.get_systems()
for x in systems:
for y in x["interfaces"]:
if x["interfaces"][y]["dns_name"] == dns_name:
name = x["name"]
return self.get_system_for_koan(name)
return {}
def get_repo(self,name,flatten=False,token=None,**rest):
"""
Returns the repo named "name" as a hash.
"""
self._log("get_repo",name=name,token=token)
return self.__get_specific("repo",name,flatten=flatten)
def get_image(self,name,flatten=False,token=None,**rest):
"""
Returns the repo named "name" as a hash.
"""
self._log("get_image",name=name,token=token)
return self.__get_specific("image",name,flatten=flatten)
def get_distro_as_rendered(self,name,token=None,**rest):
"""
Return the distribution as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_distro_for_koan(self,name)
def get_distro_for_koan(self,name,token=None,**rest):
"""
Same as get_distro_as_rendered.
"""
self._log("get_distro_as_rendered",name=name,token=token)
obj = self.api.find_distro(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_profile_as_rendered(self,name,token=None,**rest):
"""
Return the profile as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_profile_for_koan(name,token)
def get_profile_for_koan(self,name,token=None,**rest):
"""
Same as get_profile_as_rendered
"""
self._log("get_profile_as_rendered", name=name, token=token)
obj = self.api.find_profile(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_system_as_rendered(self,name,token=None,**rest):
"""
Return the system as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_system_for_koan(self,name)
def get_system_for_koan(self,name,token=None,**rest):
"""
Same as get_system_as_rendered.
"""
self._log("get_system_as_rendered",name=name,token=token)
obj = self.api.find_system(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_repo_as_rendered(self,name,token=None,**rest):
"""
Return the repo as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_repo_for_koan(self,name)
def get_repo_for_koan(self,name,token=None,**rest):
"""
Same as get_repo_as_rendered.
"""
self._log("get_repo_as_rendered",name=name,token=token)
obj = self.api.find_repo(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_image_as_rendered(self,name,token=None,**rest):
"""
Return the image as passed through cobbler's
inheritance/graph engine. Shows what would be installed, not
the input data.
"""
return self.get_image_for_koan(self,name)
def get_image_for_koan(self,name,token=None,**rest):
"""
Same as get_image_as_rendered.
"""
self._log("get_image_as_rendered",name=name,token=token)
obj = self.api.find_image(name=name)
if obj is not None:
return self.xmlrpc_hacks(utils.blender(self.api, True, obj))
return self.xmlrpc_hacks({})
def get_random_mac(self,token=None,**rest):
"""
Wrapper for utils.get_random_mac
Used in the webui
"""
self._log("get_random_mac",token=None)
return utils.get_random_mac(self.api)
def xmlrpc_hacks(self,data):
"""
Convert None in XMLRPC to just '~' to make extra sure a client
that can't allow_none can deal with this. ALSO: a weird hack ensuring
that when dicts with integer keys (or other types) are transmitted
with string keys.
"""
if data is None:
data = '~'
elif type(data) == list:
data = [ self.xmlrpc_hacks(x) for x in data ]
elif type(data) == dict:
data2 = {}
for key in data.keys():
keydata = data[key]
data2[str(key)] = self.xmlrpc_hacks(data[key])
return data2
return data
def get_status(self,**rest):
"""
Returns the same information as `cobbler status`
"""
return self.api.status()
######
# READ WRITE METHODS BELOW REQUIRE A TOKEN, use login()
# TO OBTAIN ONE
######
def __get_random(self,length):
urandom = open("/dev/urandom")
b64 = base64.encodestring(urandom.read(length))
urandom.close()
b64 = b64.replace("\n","")
return b64
def __make_token(self,user):
"""
Returns a new random token.
"""
b64 = self.__get_random(25)
self.token_cache[b64] = (time.time(), user)
return b64
def __invalidate_expired_tokens(self):
"""
Deletes any login tokens that might have expired.
"""
timenow = time.time()
for token in self.token_cache.keys():
(tokentime, user) = self.token_cache[token]
if (timenow > tokentime + TOKEN_TIMEOUT):
self._log("expiring token",token=token,debug=True)
del self.token_cache[token]
# and also expired objects
for oid in self.object_cache.keys():
(tokentime, entry) = self.object_cache[oid]
if (timenow > tokentime + CACHE_TIMEOUT):
del self.object_cache[oid]
def __validate_user(self,input_user,input_password):
"""
Returns whether this user/pass combo should be given
access to the cobbler read-write API.
For the system user, this answer is always "yes", but
it is only valid for the socket interface.
FIXME: currently looks for users in /etc/cobbler/auth.conf
Would be very nice to allow for PAM and/or just Kerberos.
"""
return self.api.authenticate(input_user,input_password)
def __validate_token(self,token):
"""
Checks to see if an API method can be called when
the given token is passed in. Updates the timestamp
of the token automatically to prevent the need to
repeatedly call login(). Any method that needs
access control should call this before doing anything
else.
"""
self.__invalidate_expired_tokens()
#if not self.auth_enabled:
# user = self.get_user_from_token(token)
# # old stuff, preserving for future usage
# # if user == "<system>":
# # self.token_cache[token] = (time.time(), user) # update to prevent timeout
# # return True
if self.token_cache.has_key(token):
user = self.get_user_from_token(token)
if user == "<system>":
# system token is only valid over Unix socket
return False
self.token_cache[token] = (time.time(), user) # update to prevent timeout
return True
else:
self._log("invalid token",token=token)
raise CX(_("invalid token: %s" % token))
def __name_to_object(self,resource,name):
if resource.find("distro") != -1:
return self.api.find_distro(name)
if resource.find("profile") != -1:
return self.api.find_profile(name)
if resource.find("system") != -1:
return self.api.find_system(name)
if resource.find("repo") != -1:
return self.api.find_repo(name)
return None
def check_access_no_fail(self,token,resource,arg1=None,arg2=None):
"""
This is called by the WUI to decide whether an element
is editable or not. It differs form check_access in that
it is supposed to /not/ log the access checks (TBA) and does
not raise exceptions.
"""
need_remap = False
for x in [ "distro", "profile", "system", "repo" ]:
if arg1 is not None and resource.find(x) != -1:
need_remap = True
break
if need_remap:
# we're called with an object name, but need an object
arg1 = self.__name_to_object(resource,arg1)
try:
self.check_access(token,resource,arg1,arg2)
return True
except:
utils.log_exc(self.logger)
return False
def check_access(self,token,resource,arg1=None,arg2=None):
validated = self.__validate_token(token)
user = self.get_user_from_token(token)
if not self.auth_enabled:
# for public read-only XMLRPC, permit access
self._log("permitting read-only access")
return True
rc = self.__authorize(token,resource,arg1,arg2)
self._log("authorization result: %s" % rc)
if not rc:
raise CX(_("authorization failure for user %s" % user))
return rc
def login(self,login_user,login_password):
"""
Takes a username and password, validates it, and if successful
returns a random login token which must be used on subsequent
method calls. The token will time out after a set interval if not
used. Re-logging in permitted.
"""
self._log("login attempt", user=login_user)
if self.__validate_user(login_user,login_password):
token = self.__make_token(login_user)
self._log("login succeeded",user=login_user)
return token
else:
self._log("login failed",user=login_user)
raise CX(_("login failed: %s") % login_user)
def __authorize(self,token,resource,arg1=None,arg2=None):
user = self.get_user_from_token(token)
args = [ resource, arg1, arg2 ]
self._log("calling authorize for resource %s" % args, user=user)
rc = self.api.authorize(user,resource,arg1,arg2)
if rc:
return True
else:
raise CX(_("user does not have access to resource: %s") % resource)
def logout(self,token):
"""
Retires a token ahead of the timeout.
"""
self._log("logout", token=token)
if self.token_cache.has_key(token):
del self.token_cache[token]
return True
return False
def token_check(self,token):
"""
This is a demo function that does not return anything useful.
"""
self.__validate_token(token)
return True
def sync(self,token):
"""
Run sync code, which should complete before XMLRPC timeout. We can't
do reposync this way. Would be nice to send output over AJAX/other
later.
"""
# FIXME: performance
self._log("sync",token=token)
self.check_access(token,"sync")
return self.api.sync()
def hardlink(self,token):
"""
Hardlink trees and repos to save disk space. Caution: long
running op. Until we have a task engine, this may lock other
folks out of the web app, so use wisely. It may also be timeout
prone.
"""
self._log("hardlink",token=token)
self.check_access(token,"hardlink")
return self.api.hardlink()
def new_distro(self,token):
"""
Creates a new (unconfigured) distro object. It works something like
this:
token = remote.login("user","pass")
distro_id = remote.new_distro(token)
remote.modify_distro(distro_id, 'name', 'example-distro', token)
remote.modify_distro(distro_id, 'kernel', '/foo/vmlinuz', token)
remote.modify_distro(distro_id, 'initrd', '/foo/initrd.img', token)
remote.save_distro(distro_id, token)
"""
self._log("new_distro",token=token)
self.check_access(token,"new_distro")
d = item_distro.Distro(self.api._config)
key = "___NEW___distro::%s" % self.__get_random(25)
self.object_cache[key] = (time.time(), d)
return key
def new_profile(self,token):
"""
Creates a new (unconfigured) profile object. See the documentation
for new_distro as it works exactly the same.
"""
self._log("new_profile",token=token)
self.check_access(token,"new_profile")
p = item_profile.Profile(self.api._config)
key = "___NEW___profile::%s" % self.__get_random(25)
self.object_cache[key] = (time.time(), p)
return key
def new_subprofile(self,token):
"""
A subprofile is a profile that inherits directly from another profile,
not a distro. In addition to the normal profile setup, setting
the parent variable to the name of an existing profile is also
mandatory. Systems can be assigned to subprofiles just like they
were regular profiles. The same XMLRPC API methods work on them as profiles
also.
"""
self._log("new_subprofile",token=token)
self.check_access(token,"new_subprofile")
p = item_profile.Profile(self.api._config,is_subobject=True)
key = "___NEW___profile::%s" % self.__get_random(25)
self.object_cache[key] = (time.time(), p)
return key
def new_system(self,token):
"""
Creates a new (unconfigured) system object. See the documentation
for new_distro as it works exactly the same.
"""
self._log("new_system",token=token)
self.check_access(token,"new_system")
s = item_system.System(self.api._config)
key = "___NEW___system::%s" % self.__get_random(25)
self.object_cache[key] = (time.time(), s)
return key
def new_repo(self,token):
"""
Creates a new (unconfigured) repo object. See the documentation
for new_distro as it works exactly the same.
"""
self._log("new_repo",token=token)
self.check_access(token,"new_repo")
r = item_repo.Repo(self.api._config)
key = "___NEW___repo::%s" % self.__get_random(25)
self.object_cache[key] = (time.time(), r)
return key
def new_image(self,token):
"""
Creates a new (unconfigured) image object. See the documentation
for new_distro as it works exactly the same.
"""
self._log("new_image",token=token)
self.check_access(token,"new_image")
i = item_image.Image(self.api._config)
key = "___NEW___image::%s" % self.__get_random(25)
self.object_cache[key] = (time.time(), i)
return key
def get_distro_handle(self,name,token=None):
"""
Given the name of an distro (or other search parameters), return an
object id that can be passed in to modify_distro() or save_distro()
commands. Raises an exception if no object can be matched.
"""
self._log("get_distro_handle",token=token,name=name)
found = self.api.find_distro(name)
return "distro::%s" % found.name
def get_profile_handle(self,name,token=None):
"""
Given the name of a profile (or other search parameters), return an
object id that can be passed in to modify_profile() or save_profile()
commands. Raises an exception if no object can be matched.
"""
self._log("get_profile_handle",token=token,name=name)
found = self.api.find_profile(name)
return "profile::%s" % found.name
def get_system_handle(self,name,token=None):
"""
Given the name of an system (or other search parameters), return an
object id that can be passed in to modify_system() or save_system()
commands. Raises an exception if no object can be matched.
"""
self._log("get_system_handle",name=name,token=token)
found = self.api.find_system(name)
return "system::%s" % found.name
def get_repo_handle(self,name,token=None):
"""
Given the name of an repo (or other search parameters), return an
object id that can be passed in to modify_repo() or save_repo()
commands. Raises an exception if no object can be matched.
"""
self._log("get_repo_handle",name=name,token=token)
found = self.api.find_repo(name)
return "repo::%s" % found.name
def get_image_handle(self,name,token=None):
"""
Given the name of an image (or other search parameters), return an
object id that can be passed in to modify_image() or save_image()
commands. Raises an exception if no object can be matched.
"""
self._log("get_image_handle",name=name,token=token)
found = self.api.find_image(name)
return "image::%s" % found.name
def save_distro(self,object_id,token,editmode="bypass"):
"""
Saves a newly created or modified distro object to disk.
"""
self._log("save_distro",object_id=object_id,token=token)
obj = self.__get_object(object_id)
self.check_access(token,"save_distro",obj)
if editmode == "new":
return self.api.add_distro(obj,check_for_duplicate_names=True)
else:
return self.api.add_distro(obj)
def save_profile(self,object_id,token,editmode="bypass"):
"""
Saves a newly created or modified profile object to disk.
"""
self._log("save_profile",token=token,object_id=object_id)
obj = self.__get_object(object_id)
self.check_access(token,"save_profile",obj)
if editmode == "new":
return self.api.add_profile(obj,check_for_duplicate_names=True)
else:
return self.api.add_profile(obj)
def save_system(self,object_id,token,editmode="bypass"):
"""
Saves a newly created or modified system object to disk.
"""
self._log("save_system",token=token,object_id=object_id)
obj = self.__get_object(object_id)
self.check_access(token,"save_system",obj)
if editmode == "new":
return self.api.add_system(obj,check_for_duplicate_names=True,check_for_duplicate_netinfo=True)
elif editmode == "edit":
return self.api.add_system(obj,check_for_duplicate_netinfo=True)
else:
return self.api.add_system(obj)
def save_repo(self,object_id,token=None,editmode="bypass"):
"""
Saves a newly created or modified repo object to disk.
"""
self._log("save_repo",object_id=object_id,token=token)
obj = self.__get_object(object_id)
self.check_access(token,"save_repo",obj)
if editmode == "new":
return self.api.add_repo(obj,check_for_duplicate_names=True)
else:
return self.api.add_repo(obj)
def save_image(self,object_id,token=None,editmode="bypass"):
"""
Saves a newly created or modified repo object to disk.
"""
self._log("save_image",object_id=object_id,token=token)
obj = self.__get_object(object_id)
self.check_access(token,"save_image",obj)
if editmode == "new":
return self.api.add_image(obj,check_for_duplicate_names=True)
else:
return self.api.add_image(obj)
## FIXME: refactor out all of the boilerplate stuff like ^^
def copy_distro(self,object_id,newname,token=None):
"""
All copy methods are pretty much the same. Get an object handle, pass in the new
name for it.
"""
self._log("copy_distro",object_id=object_id,token=token)
self.check_access(token,"copy_distro")
obj = self.__get_object(object_id)
return self.api.copy_distro(obj,newname)
def copy_profile(self,object_id,newname,token=None):
self._log("copy_profile",object_id=object_id,token=token)
self.check_access(token,"copy_profile")
obj = self.__get_object(object_id)
return self.api.copy_profile(obj,newname)
def copy_system(self,object_id,newname,token=None):
self._log("copy_system",object_id=object_id,token=token)
self.check_access(token,"copy_system")
obj = self.__get_object(object_id)
return self.api.copy_system(obj,newname)
def copy_repo(self,object_id,newname,token=None):
self._log("copy_repo",object_id=object_id,token=token)
self.check_access(token,"copy_repo")
obj = self.__get_object(object_id)
return self.api.copy_repo(obj,newname)
def copy_image(self,object_id,newname,token=None):
self._log("copy_image",object_id=object_id,token=token)
self.check_access(token,"copy_image")
obj = self.__get_object(object_id)
return self.api.copy_image(obj,newname)
def rename_distro(self,object_id,newname,token=None):
"""
All rename methods are pretty much the same. Get an object handle, pass in a new
name for it. Rename will modify dependencies to point them at the new
object.
"""
self._log("rename_distro",object_id=object_id,token=token)
obj = self.__get_object(object_id)
return self.api.rename_distro(obj,newname)
def rename_profile(self,object_id,newname,token=None):
self._log("rename_profile",object_id=object_id,token=token)
self.check_access(token,"rename_profile")
obj = self.__get_object(object_id)
return self.api.rename_profile(obj,newname)
def rename_system(self,object_id,newname,token=None):
self._log("rename_system",object_id=object_id,token=token)
self.check_access(token,"rename_system")
obj = self.__get_object(object_id)
return self.api.rename_system(obj,newname)
def rename_repo(self,object_id,newname,token=None):
self._log("rename_repo",object_id=object_id,token=token)
self.check_access(token,"rename_repo")
obj = self.__get_object(object_id)
return self.api.rename_repo(obj,newname)
def rename_image(self,object_id,newname,token=None):
self._log("rename_image",object_id=object_id,token=token)
self.check_access(token,"rename_image")
obj = self.__get_object(object_id)
return self.api.rename_image(obj,newname)
def __get_object(self, object_id):
if object_id.startswith("___NEW___"):
return self.object_cache[object_id][1]
(otype, oname) = object_id.split("::",1)
if otype == "distro":
return self.api.find_distro(oname)
elif otype == "profile":
return self.api.find_profile(oname)
elif otype == "system":
return self.api.find_system(oname)
elif otype == "repo":
return self.api.find_repo(oname)
elif otype == "image":
return self.api.find_image(oname)
else:
return "invalid"
def __call_method(self, obj, attribute, arg):
"""
Internal function used by the modify routines.
"""
method = obj.remote_methods().get(attribute, None)
if method == None:
raise CX(_("object has no method: %s") % attribute)
return method(arg)
def modify_distro(self,object_id,attribute,arg,token):
"""
Allows modification of certain attributes on newly created or
existing distro object handle.
"""
obj = self.__get_object(object_id)
self.check_access(token, "modify_distro", obj, attribute)
return self.__call_method(obj, attribute, arg)
def modify_profile(self,object_id,attribute,arg,token):
"""
Allows modification of certain attributes on newly created or
existing profile object handle.
"""
obj = self.__get_object(object_id)
self.check_access(token, "modify_profile", obj, attribute)
return self.__call_method(obj, attribute, arg)
def modify_system(self,object_id,attribute,arg,token):
"""
Allows modification of certain attributes on newly created or
existing system object handle.
"""
obj = self.__get_object(object_id)
self.check_access(token, "modify_system", obj, attribute)
return self.__call_method(obj, attribute, arg)
def modify_repo(self,object_id,attribute,arg,token):
"""
Allows modification of certain attributes on newly created or
existing repo object handle.
"""
obj = self.__get_object(object_id)
self.check_access(token, "modify_repo", obj, attribute)
return self.__call_method(obj, attribute, arg)
def modify_image(self,object_id,attribute,arg,token):
"""
Allows modification of certain attributes on newly created or
existing image object handle.
"""
## FIXME: lots of boilerplate to remove here, move to utils.py
obj = self.__get_object(object_id)
self.check_access(token, "modify_image", obj, attribute)
return self.__call_method(obj, attribute, arg)
def remove_distro(self,name,token,recursive=1):
"""
Deletes a distro from a collection. Note that this just requires the name
of the distro, not a handle.
"""
self._log("remove_distro (%s)" % recursive,name=name,token=token)
self.check_access(token, "remove_distro", name)
rc = self.api.remove_distro(name,recursive=True)
return rc
def remove_profile(self,name,token,recursive=1):
"""
Deletes a profile from a collection. Note that this just requires the name
"""
self._log("remove_profile (%s)" % recursive,name=name,token=token)
self.check_access(token, "remove_profile", name)
rc = self.api.remove_profile(name,recursive=True)
return rc
def remove_system(self,name,token,recursive=1):
"""
Deletes a system from a collection. Note that this just requires the name
of the distro, not a handle.
"""
self._log("remove_system (%s)" % recursive,name=name,token=token)
self.check_access(token, "remove_system", name)
rc = self.api.remove_system(name)
return rc
def remove_repo(self,name,token,recursive=1):
"""
Deletes a repo from a collection. Note that this just requires the name
of the repo, not a handle.
"""
self._log("remove_repo (%s)" % recursive,name=name,token=token)
self.check_access(token, "remove_repo", name)
rc = self.api.remove_repo(name, recursive=True)
return rc
def remove_image(self,name,token,recursive=1):
"""
Deletes a image from a collection. Note that this just requires the name
of the image, not a handle.
"""
self._log("remove_image (%s)" % recursive,name=name,token=token)
self.check_access(token, "remove_image", name)
rc = self.api.remove_image(name, recursive=True)
return rc
def read_or_write_kickstart_template(self,kickstart_file,is_read,new_data,token):
"""
Allows the WebUI to be used as a kickstart file editor. For security
reasons we will only allow kickstart files to be edited if they reside in
/var/lib/cobbler/kickstarts/ or /etc/cobbler. This limits the damage
doable by Evil who has a cobbler password but not a system password.
Also if living in /etc/cobbler the file must be a kickstart file.
"""
if is_read:
what = "read_kickstart_template"
else:
what = "write_kickstart_template"
self._log(what,name=kickstart_file,token=token)
self.check_access(token,what,kickstart_file,is_read)
if kickstart_file.find("..") != -1 or not kickstart_file.startswith("/"):
raise CX(_("tainted file location"))
if not kickstart_file.startswith("/etc/cobbler/") and not kickstart_file.startswith("/var/lib/cobbler/kickstarts"):
raise CX(_("unable to view or edit kickstart in this location"))
if kickstart_file.startswith("/etc/cobbler/"):
if not kickstart_file.endswith(".ks") and not kickstart_file.endswith(".cfg"):
# take care to not allow config files to be altered.
raise CX(_("this does not seem to be a kickstart file"))
if not is_read and not os.path.exists(kickstart_file):
raise CX(_("new files must go in /var/lib/cobbler/kickstarts"))
if is_read:
fileh = open(kickstart_file,"r")
data = fileh.read()
fileh.close()
return data
else:
if new_data == -1:
# delete requested
if not self.is_kickstart_in_use(kickstart_file,token):
os.remove(kickstart_file)
else:
raise CX(_("attempt to delete in-use file"))
else:
fileh = open(kickstart_file,"w+")
fileh.write(new_data)
fileh.close()
return True
def power_system(self,object_id,power=None,token=None):
"""
Allows poweron/poweroff/reboot of a system
"""
obj = self.__get_object(object_id)
self.check_access(token, "power_system", obj)
if power=="on":
rc=self.api.power_on(obj)
elif power=="off":
rc=self.api.power_off(obj)
elif power=="reboot":
rc=self.api.reboot(obj)
else:
raise CX(_("invalid power mode '%s', expected on/off/reboot" % power))
return rc
def deploy(self, object_id, virt_host=None, virt_group=None, token=None):
"""
Deploy a system
"""
obj = self.__get_object(object_id)
self.check_access(token, "deploy", obj)
rc = self.api.deploy(obj, virt_host=virt_host, virt_group=virt_group)
return rc
# *********************************************************************************
# *********************************************************************************
class CobblerXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def __init__(self, args):
self.allow_reuse_address = True
SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self,args)
# *********************************************************************************
# *********************************************************************************
class ProxiedXMLRPCInterface:
def __init__(self,api,proxy_class,enable_auth_if_relevant=True):
self.proxied = proxy_class(api,enable_auth_if_relevant)
self.logger = self.proxied.api.logger
def _dispatch(self, method, params, **rest):
if not hasattr(self.proxied, method):
self.logger.error("remote:unknown method %s" % method)
raise CX(_("Unknown remote method"))
method_handle = getattr(self.proxied, method)
try:
return method_handle(*params)
except Exception, e:
utils.log_exc(self.logger)
raise e
# *********************************************************************
# *********************************************************************
def _test_setup_modules(authn="authn_testing",authz="authz_allowall",pxe_once=1):
# rewrite modules.conf so we know we can use the testing module
# for xmlrpc rw testing (Makefile will put the user value back)
import yaml
import Cheetah.Template as Template
MODULES_TEMPLATE = "installer_templates/modules.conf.template"
DEFAULTS = "installer_templates/defaults"
fh = open(DEFAULTS)
data = yaml.load(fh.read())
fh.close()
data["authn_module"] = authn
data["authz_module"] = authz
data["pxe_once"] = pxe_once
t = Template.Template(file=MODULES_TEMPLATE, searchList=[data])
open("/etc/cobbler/modules.conf","w+").write(t.respond())
def _test_setup_settings(pxe_once=1):
# rewrite modules.conf so we know we can use the testing module
# for xmlrpc rw testing (Makefile will put the user value back)
import yaml
import Cheetah.Template as Template
MODULES_TEMPLATE = "installer_templates/settings.template"
DEFAULTS = "installer_templates/defaults"
fh = open(DEFAULTS)
data = yaml.load(fh.read())
fh.close()
data["pxe_once"] = pxe_once
t = Template.Template(file=MODULES_TEMPLATE, searchList=[data])
open("/etc/cobbler/settings","w+").write(t.respond())
def _test_bootstrap_restart():
rc1 = subprocess.call(["/sbin/service","cobblerd","restart"],shell=False,close_fds=True)
assert rc1 == 0
rc2 = subprocess.call(["/sbin/service","httpd","restart"],shell=False,close_fds=True)
assert rc2 == 0
time.sleep(5)
_test_remove_objects()
def _test_remove_objects():
api = cobbler_api.BootAPI() # local handle
# from ro tests
d0 = api.find_distro("distro0")
i0 = api.find_image("image0")
r0 = api.find_image("repo0")
# from rw tests
d1 = api.find_distro("distro1")
i1 = api.find_image("image1")
r1 = api.find_image("repo1")
if d0 is not None: api.remove_distro(d0, recursive = True)
if i0 is not None: api.remove_image(i0)
if r0 is not None: api.remove_repo(r0)
if d1 is not None: api.remove_distro(d1, recursive = True)
if i1 is not None: api.remove_image(i1)
if r1 is not None: api.remove_repo(r1)
def test_xmlrpc_ro():
_test_bootstrap_restart()
server = xmlrpclib.Server("http://127.0.0.1/cobbler_api")
time.sleep(2)
# delete all distributions
distros = server.get_distros()
profiles = server.get_profiles()
systems = server.get_systems()
repos = server.get_repos()
images = server.get_systems()
settings = server.get_settings()
assert type(distros) == type([])
assert type(profiles) == type([])
assert type(systems) == type([])
assert type(repos) == type([])
assert type(images) == type([])
assert type(settings) == type({})
# now populate with something more useful
# using the non-remote API
api = cobbler_api.BootAPI() # local handle
before_distros = len(api.distros())
before_profiles = len(api.profiles())
before_systems = len(api.systems())
before_repos = len(api.repos())
before_images = len(api.images())
fake = open("/tmp/cobbler.fake","w+")
fake.write("")
fake.close()
distro = api.new_distro()
distro.set_name("distro0")
distro.set_kernel("/tmp/cobbler.fake")
distro.set_initrd("/tmp/cobbler.fake")
api.add_distro(distro)
repo = api.new_repo()
repo.set_name("repo0")
if not os.path.exists("/tmp/empty"):
os.mkdir("/tmp/empty",770)
repo.set_mirror("/tmp/empty")
files = glob.glob("rpm-build/*.rpm")
if len(files) == 0:
raise Exception("Tests must be run from the cobbler checkout directory.")
subprocess.call("cp rpm-build/*.rpm /tmp/empty",shell=True,close_fds=True)
api.add_repo(repo)
profile = api.new_profile()
profile.set_name("profile0")
profile.set_distro("distro0")
profile.set_kickstart("/var/lib/cobbler/kickstarts/sample.ks")
profile.set_repos(["repo0"])
api.add_profile(profile)
system = api.new_system()
system.set_name("system0")
system.set_hostname("hostname0")
system.set_gateway("192.168.1.1")
system.set_profile("profile0")
system.set_dns_name("hostname0","eth0")
api.add_system(system)
image = api.new_image()
image.set_name("image0")
image.set_file("/tmp/cobbler.fake")
api.add_image(image)
# reposync is required in order to create the repo config files
api.reposync(name="repo0")
# FIXME: the following tests do not yet look to see that all elements
# retrieved match what they were created with, but we presume this
# all works. It is not a high priority item to test but do not assume
# this is a complete test of access functions.
def comb(haystack, needle):
for x in haystack:
if x["name"] == needle:
return True
return False
distros = server.get_distros()
assert len(distros) == before_distros + 1
assert comb(distros, "distro0")
profiles = server.get_profiles()
print "BEFORE: %s" % before_profiles
print "CURRENT: %s" % len(profiles)
for p in profiles:
print " PROFILES: %s" % p["name"]
for p in api.profiles():
print " API : %s" % p.name
assert len(profiles) == before_profiles + 1
assert comb(profiles, "profile0")
systems = server.get_systems()
# assert len(systems) == before_systems + 1
assert comb(systems, "system0")
repos = server.get_repos()
# FIXME: disable temporarily
# assert len(repos) == before_repos + 1
assert comb(repos, "repo0")
images = server.get_images()
# assert len(images) == before_images + 1
assert comb(images, "image0")
# now test specific gets
distro = server.get_distro("distro0")
assert distro["name"] == "distro0"
assert type(distro["kernel_options"] == type({}))
profile = server.get_profile("profile0")
assert profile["name"] == "profile0"
assert type(profile["kernel_options"] == type({}))
system = server.get_system("system0")
assert system["name"] == "system0"
assert type(system["kernel_options"] == type({}))
repo = server.get_repo("repo0")
assert repo["name"] == "repo0"
image = server.get_image("image0")
assert image["name"] == "image0"
# now test the calls koan uses
# the difference is that koan's object types are flattened somewhat
# and also that they are passed through utils.blender() so they represent
# not the object but the evaluation of the object tree at that object.
server.update() # should be unneeded
distro = server.get_distro_for_koan("distro0")
assert distro["name"] == "distro0"
assert type(distro["kernel_options"] == type(""))
profile = server.get_profile_for_koan("profile0")
assert profile["name"] == "profile0"
assert type(profile["kernel_options"] == type(""))
system = server.get_system_for_koan("system0")
assert system["name"] == "system0"
assert type(system["kernel_options"] == type(""))
repo = server.get_repo_for_koan("repo0")
assert repo["name"] == "repo0"
image = server.get_image_for_koan("image0")
assert image["name"] == "image0"
# now test some of the additional webui calls
# compatible profiles, etc
assert server.ping() == True
assert server.get_size("distros") == 1
assert server.get_size("profiles") == 1
assert server.get_size("systems") == 1
assert server.get_size("repos") == 1
assert server.get_size("images") == 1
templates = server.get_kickstart_templates("???")
assert "/var/lib/cobbler/kickstarts/sample.ks" in templates
assert server.is_kickstart_in_use("/var/lib/cobbler/kickstarts/sample.ks","???") == True
assert server.is_kickstart_in_use("/var/lib/cobbler/kickstarts/legacy.ks","???") == False
generated = server.generate_kickstart("profile0")
assert type(generated) == type("")
assert generated.find("ERROR") == -1
assert generated.find("url") != -1
assert generated.find("network") != -1
yumcfg = server.get_repo_config_for_profile("profile0")
assert type(yumcfg) == type("")
assert yumcfg.find("ERROR") == -1
assert yumcfg.find("http://") != -1
yumcfg = server.get_repo_config_for_system("system0")
assert type(yumcfg) == type("")
assert yumcfg.find("ERROR") == -1
assert yumcfg.find("http://") != -1
server.register_mac("CC:EE:FF:GG:AA:AA","profile0")
systems = server.get_systems()
found = False
for s in systems:
if s["name"] == "CC:EE:FF:GG:AA:AA":
for iname in s["interfaces"]:
if s["interfaces"]["iname"].get("mac_address") == "CC:EE:FF:GG:AA:AA":
found = True
break
if found:
break
# FIXME: mac registration test code needs a correct settings file in order to
# be enabled.
# assert found == True
# FIXME: the following tests don't work if pxe_just_once is disabled in settings so we need
# to account for this by turning it on...
# basically we need to rewrite the settings file
# system = server.get_system("system0")
# assert system["netboot_enabled"] == "True"
# rc = server.disable_netboot("system0")
# assert rc == True
# ne = server.get_system("system0")["netboot_enabled"]
# assert ne == False
# FIXME: tests for new built-in configuration management feature
# require that --template-files attributes be set. These do not
# retrieve the kickstarts but rather config files (see Wiki topics).
# This is probably better tested at the URL level with urlgrabber, one layer
# up, in a different set of tests..
# FIXME: tests for rendered kickstart retrieval, same as above
assert server.run_install_triggers("pre","profile","profile0","127.0.0.1")
assert server.run_install_triggers("post","profile","profile0","127.0.0.1")
assert server.run_install_triggers("pre","system","system0","127.0.0.1")
assert server.run_install_triggers("post","system","system0","127.0.0.1")
ver = server.version()
assert (str(ver)[0] == "?" or str(ver).find(".") != -1)
# do removals via the API since the read-only API can't do them
# and the read-write tests are seperate
_test_remove_objects()
# this last bit mainly tests the tests, to ensure we've left nothing behind
# not XMLRPC. Tests polluting the user config is not desirable even though
# we do save/restore it.
# assert (len(api.distros()) == before_distros)
# assert (len(api.profiles()) == before_profiles)
# assert (len(api.systems()) == before_systems)
# assert (len(api.images()) == before_images)
# assert (len(api.repos()) == before_repos)
def test_xmlrpc_rw():
# ideally we need tests for the various auth modes, not just one
# and the ownership module, though this will provide decent coverage.
_test_setup_modules(authn="authn_testing",authz="authz_allowall")
_test_bootstrap_restart()
server = xmlrpclib.Server("http://127.0.0.1/cobbler_api") # remote
api = cobbler_api.BootAPI() # local instance, /DO/ ping cobblerd
# note if authn_testing is not engaged this will not work
# test getting token, will raise remote exception on fail
token = server.login("testing","testing")
# create distro
did = server.new_distro(token)
server.modify_distro(did, "name", "distro1", token)
server.modify_distro(did, "kernel", "/tmp/cobbler.fake", token)
server.modify_distro(did, "initrd", "/tmp/cobbler.fake", token)
server.modify_distro(did, "kopts", { "dog" : "fido", "cat" : "fluffy" }, token) # hash or string
server.modify_distro(did, "ksmeta", "good=sg1 evil=gould", token) # hash or string
server.modify_distro(did, "breed", "redhat", token)
server.modify_distro(did, "os-version", "rhel5", token)
server.modify_distro(did, "owners", "sam dave", token) # array or string
server.modify_distro(did, "mgmt-classes", "blip", token) # list or string
server.modify_distro(did, "template-files", "/tmp/cobbler.fake=/tmp/a /etc/fstab=/tmp/b",token) # hash or string
server.modify_distro(did, "comment", "...", token)
server.modify_distro(did, "redhat_management_key", "ALPHA", token)
server.modify_distro(did, "redhat_management_server", "rhn.example.com", token)
server.save_distro(did, token)
# use the non-XMLRPC API to check that it's added seeing we tested XMLRPC RW APIs above
# this makes extra sure it's been committed to disk.
api.deserialize()
assert api.find_distro("distro1") != None
pid = server.new_profile(token)
server.modify_profile(pid, "name", "profile1", token)
server.modify_profile(pid, "distro", "distro1", token)
server.modify_profile(pid, "enable-menu", True, token)
server.modify_profile(pid, "kickstart", "/var/lib/cobbler/kickstarts/sample.ks", token)
server.modify_profile(pid, "kopts", { "level" : "11" }, token)
server.modify_profile(pid, "kopts-post", "noapic", token)
server.modify_profile(pid, "virt-auto-boot", 0, token)
server.modify_profile(pid, "virt-file-size", 20, token)
server.modify_profile(pid, "virt-ram", 2048, token)
server.modify_profile(pid, "repos", [], token)
server.modify_profile(pid, "template-files", {}, token)
server.modify_profile(pid, "virt-path", "VolGroup00", token)
server.modify_profile(pid, "virt-bridge", "virbr1", token)
server.modify_profile(pid, "virt-cpus", 2, token)
server.modify_profile(pid, "owners", [ "sam", "dave" ], token)
server.modify_profile(pid, "mgmt-classes", "one two three", token)
server.modify_profile(pid, "comment", "...", token)
server.modify_profile(pid, "name_servers", ["one","two"], token)
server.modify_profile(pid, "name_servers_search", ["one","two"], token)
server.modify_profile(pid, "redhat_management_key", "BETA", token)
server.modify_distro(did, "redhat_management_server", "sat.example.com", token)
server.save_profile(pid, token)
api.deserialize()
assert api.find_profile("profile1") != None
sid = server.new_system(token)
server.modify_system(sid, 'name', 'system1', token)
server.modify_system(sid, 'hostname', 'system1', token)
server.modify_system(sid, 'gateway', '127.0.0.1', token)
server.modify_system(sid, 'profile', 'profile1', token)
server.modify_system(sid, 'kopts', { "dog" : "fido" }, token)
server.modify_system(sid, 'kopts-post', { "cat" : "fluffy" }, token)
server.modify_system(sid, 'kickstart', '/var/lib/cobbler/kickstarts/sample.ks', token)
server.modify_system(sid, 'netboot-enabled', True, token)
server.modify_system(sid, 'virt-path', "/opt/images", token)
server.modify_system(sid, 'virt-type', 'qemu', token)
server.modify_system(sid, 'name_servers', 'one two three four', token)
server.modify_system(sid, 'name_servers_search', 'one two three four', token)
server.modify_system(sid, 'modify-interface', {
"macaddress-eth0" : "AA:BB:CC:EE:EE:EE",
"ipaddress-eth0" : "192.168.10.50",
"gateway-eth0" : "192.168.10.1",
"virtbridge-eth0" : "virbr0",
"dnsname-eth0" : "foo.example.com",
"static-eth0" : False,
"dhcptag-eth0" : "section2",
"staticroutes-eth0" : "a:b:c d:e:f"
}, token)
server.modify_system(sid, 'modify-interface', {
"static-eth1" : False,
"staticroutes-eth1" : [ "g:h:i", "j:k:l" ]
}, token)
server.modify_system(sid, "mgmt-classes", [ "one", "two", "three"], token)
server.modify_system(sid, "template-files", {}, token)
server.modify_system(sid, "comment", "...", token)
server.modify_system(sid, "power_address", "power.example.org", token)
server.modify_system(sid, "power_type", "ipmitool", token)
server.modify_system(sid, "power_user", "Admin", token)
server.modify_system(sid, "power_pass", "magic", token)
server.modify_system(sid, "power_id", "7", token)
server.modify_system(sid, "redhat_management_key", "GAMMA", token)
server.modify_distro(did, "redhat_management_server", "spacewalk.example.com", token)
server.save_system(sid,token)
api.deserialize()
assert api.find_system("system1") != None
# FIXME: add some checks on object contents
iid = server.new_image(token)
server.modify_image(iid, "name", "image1", token)
server.modify_image(iid, "image-type", "iso", token)
server.modify_image(iid, "breed", "redhat", token)
server.modify_image(iid, "os-version", "rhel5", token)
server.modify_image(iid, "arch", "x86_64", token)
server.modify_image(iid, "file", "nfs://server/path/to/x.iso", token)
server.modify_image(iid, "owners", [ "alex", "michael" ], token)
server.modify_image(iid, "virt-auto-boot", 0, token)
server.modify_image(iid, "virt-cpus", 1, token)
server.modify_image(iid, "virt-file-size", 5, token)
server.modify_image(iid, "virt-bridge", "virbr0", token)
server.modify_image(iid, "virt-path", "VolGroup01", token)
server.modify_image(iid, "virt-ram", 1024, token)
server.modify_image(iid, "virt-type", "xenpv", token)
server.modify_image(iid, "comment", "...", token)
server.save_image(iid, token)
api.deserialize()
assert api.find_image("image1") != None
# FIXME: add some checks on object contents
# FIXME: repo adds
rid = server.new_repo(token)
server.modify_repo(rid, "name", "repo1", token)
server.modify_repo(rid, "arch", "x86_64", token)
server.modify_repo(rid, "mirror", "http://example.org/foo/x86_64", token)
server.modify_repo(rid, "keep-updated", True, token)
server.modify_repo(rid, "priority", "50", token)
server.modify_repo(rid, "rpm-list", [], token)
server.modify_repo(rid, "createrepo-flags", "--verbose", token)
server.modify_repo(rid, "yumopts", {}, token)
server.modify_repo(rid, "owners", [ "slash", "axl" ], token)
server.modify_repo(rid, "mirror-locally", True, token)
server.modify_repo(rid, "environment", {}, token)
server.modify_repo(rid, "comment", "...", token)
server.save_repo(rid, token)
api.deserialize()
assert api.find_repo("repo1") != None
# FIXME: add some checks on object contents
# test handle lookup
did = server.get_distro_handle("distro1", token)
assert did != None
rid = server.get_repo_handle("repo1", token)
assert rid != None
iid = server.get_image_handle("image1", token)
assert iid != None
# test renames
rc = server.rename_distro(did, "distro2", token)
assert rc == True
# object has changed due to parent rename, get a new handle
pid = server.get_profile_handle("profile1", token)
assert pid != None
rc = server.rename_profile(pid, "profile2", token)
assert rc == True
# object has changed due to parent rename, get a new handle
sid = server.get_system_handle("system1", token)
assert sid != None
rc = server.rename_system(sid, "system2", token)
assert rc == True
rc = server.rename_repo(rid, "repo2", token)
assert rc == True
rc = server.rename_image(iid, "image2", token)
assert rc == True
# FIXME: make the following code unneccessary
api.clear()
api.deserialize()
assert api.find_distro("distro2") != None
assert api.find_profile("profile2") != None
assert api.find_repo("repo2") != None
assert api.find_image("image2") != None
assert api.find_system("system2") != None
# BOOKMARK: currently here in terms of test testing.
for d in api.distros():
print "FOUND DISTRO: %s" % d.name
assert api.find_distro("distro1") == None
assert api.find_profile("profile1") == None
assert api.find_repo("repo1") == None
assert api.find_image("image1") == None
assert api.find_system("system1") == None
did = server.get_distro_handle("distro2", token)
assert did != None
pid = server.get_profile_handle("profile2", token)
assert pid != None
rid = server.get_repo_handle("repo2", token)
assert rid != None
sid = server.get_system_handle("system2", token)
assert sid != None
iid = server.get_image_handle("image2", token)
assert iid != None
# test copies
server.copy_distro(did, "distro1", token)
server.copy_profile(pid, "profile1", token)
server.copy_repo(rid, "repo1", token)
server.copy_image(iid, "image1", token)
server.copy_system(sid, "system1", token)
api.deserialize()
assert api.find_distro("distro2") != None
assert api.find_profile("profile2") != None
assert api.find_repo("repo2") != None
assert api.find_image("image2") != None
assert api.find_system("system2") != None
assert api.find_distro("distro1") != None
assert api.find_profile("profile1") != None
assert api.find_repo("repo1") != None
assert api.find_image("image1") != None
assert api.find_system("system1") != None
assert server.last_modified_time() > 0
print server.get_distros_since(2)
assert len(server.get_distros_since(2)) > 0
assert len(server.get_profiles_since(2)) > 0
assert len(server.get_systems_since(2)) > 0
assert len(server.get_images_since(2)) > 0
assert len(server.get_repos_since(2)) > 0
assert len(server.get_distros_since(2)) > 0
now = time.time()
the_future = time.time() + 99999
assert len(server.get_distros_since(the_future)) == 0
# it would be cleaner to do this from the distro down
# and the server.update calls would then be unneeded.
server.remove_system("system1", token)
server.update()
server.remove_profile("profile1", token)
server.update()
server.remove_distro("distro1", token)
server.remove_repo("repo1", token)
server.remove_image("image1", token)
server.remove_system("system2", token)
# again, calls are needed because we're deleting in the wrong
# order. A fix is probably warranted for this.
server.update()
server.remove_profile("profile2", token)
server.update()
server.remove_distro("distro2", token)
server.remove_repo("repo2", token)
server.remove_image("image2", token)
# have to update the API as it has changed
api.update()
d1 = api.find_distro("distro1")
assert d1 is None
assert api.find_profile("profile1") is None
assert api.find_repo("repo1") is None
assert api.find_image("image1") is None
assert api.find_system("system1") is None
for x in api.distros():
print "DISTRO REMAINING: %s" % x.name
assert api.find_distro("distro2") is None
assert api.find_profile("profile2") is None
assert api.find_repo("repo2") is None
assert api.find_image("image2") is None
assert api.find_system("system2") is None
# FIXME: should not need cleanup as we've done it above
_test_remove_objects()
| javiplx/cobbler-debian | cobbler/remote.py | Python | gpl-2.0 | 82,406 |
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
"""
本模块用于ADC的读值
==================
修改时间:2017-3-23 19:09:31
作者:YaHei(zk)
联系方式:929391459@qq.com
"""
from .pinmap import PinMap
pins = PinMap('/proc', 'adc', 6)
def analog_read(channel):
"""
返回模拟口的ADC读值,
A0、A1为6位ADC,返回值范围为0-63;
A2、A3、A4、A5为12位ADC,返回值范围为0-4095
"""
with open(pins.get_path(channel), 'r') as f:
return int(f.read(32).split(':')[1].strip())
| wangxuxin/SmartHome | SmartHomeServer/SmartHome/pcduino/adc.py | Python | gpl-3.0 | 540 |
"""
Pipeline Preprocessing algorithms for Quicklook
"""
import numpy as np
import os,sys
import astropy
import astropy.io.fits as fits
from desispec import io
from desispec.io import read_raw,read_image
from desispec.io.meta import findfile
from desispec.io.fluxcalibration import read_average_flux_calibration
from desispec.calibfinder import findcalibfile
from desispec.quicklook import pas
from desispec.quicklook import qlexceptions,qllogger
from desispec.image import Image as im
from desispec.frame import Frame as fr
from desispec.io.xytraceset import read_xytraceset
from desispec.maskbits import ccdmask
qlog=qllogger.QLLogger("QuickLook",20)
log=qlog.getlog()
class Initialize(pas.PipelineAlg):
"""
This PA takes information from the fibermap and raw header
and adds it to the general info section of the merged dictionary
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="Ready"
rawtype=astropy.io.fits.hdu.hdulist.HDUList
pas.PipelineAlg.__init__(self,name,rawtype,rawtype,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
log.critical("Incompatible input!")
sys.exit("Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
raw=args[0]
flavor=kwargs['Flavor']
peaks=None
fibermap=None
if flavor != 'bias' and flavor != 'dark':
fibermap=kwargs['FiberMap']
peaks=kwargs['Peaks']
camera=kwargs['Camera']
return self.run_pa(raw,fibermap,camera,peaks,flavor)
def run_pa(self,raw,fibermap,camera,peaks,flavor):
import pytz
import datetime
from desitarget.targetmask import desi_mask
from desispec.fluxcalibration import isStdStar
#- Create general info dictionary to be sent to merged json
general_info={}
#- Get information from raw header
general_info['PROGRAM']=program=raw[0].header['PROGRAM'].upper()
calibs=['arcs','flat','bias','dark']
if not flavor in calibs:
general_info['AIRMASS']=raw[0].header['AIRMASS']
general_info['SEEING']=raw[0].header['SEEING']
#- Get information from fibermap
#- Limit flux info to fibers in camera
minfiber=int(camera[1])*500
maxfiber=minfiber+499
fibermags=[]
for flux in ['FLUX_G','FLUX_R','FLUX_Z']:
fibermags.append(22.5-2.5*np.log10(fibermap[flux][minfiber:maxfiber+1]))
#- Set sky/no flux fibers to 30 mag
for i in range(3):
skyfibs=np.where(fibermags[i]==0.)[0]
noflux=np.where(fibermags[i]==np.inf)[0]
badmags=np.array(list(set(skyfibs) | set(noflux)))
fibermags[i][badmags]=30.
general_info['FIBER_MAGS']=fibermags
#- Limit RA and DEC to 5 decimal places
targetra=fibermap['TARGET_RA'][minfiber:maxfiber+1]
general_info['RA']=[float("%.5f"%ra) for ra in targetra]
targetdec=fibermap['TARGET_DEC'][minfiber:maxfiber+1]
general_info['DEC']=[float("%.5f"%dec) for dec in targetdec]
#- Find fibers in camera per target type
elgfibers=np.where((fibermap['DESI_TARGET']&desi_mask.ELG)!=0)[0]
general_info['ELG_FIBERID']=[elgfib for elgfib in elgfibers if minfiber <= elgfib <= maxfiber]
lrgfibers=np.where((fibermap['DESI_TARGET']&desi_mask.LRG)!=0)[0]
general_info['LRG_FIBERID']=[lrgfib for lrgfib in lrgfibers if minfiber <= lrgfib <= maxfiber]
qsofibers=np.where((fibermap['DESI_TARGET']&desi_mask.QSO)!=0)[0]
general_info['QSO_FIBERID']=[qsofib for qsofib in qsofibers if minfiber <= qsofib <= maxfiber]
skyfibers=np.where((fibermap['DESI_TARGET']&desi_mask.SKY)!=0)[0]
general_info['SKY_FIBERID']=[skyfib for skyfib in skyfibers if minfiber <= skyfib <= maxfiber]
general_info['NSKY_FIB']=len(general_info['SKY_FIBERID'])
stdfibers=np.where(isStdStar(fibermap))[0]
general_info['STAR_FIBERID']=[stdfib for stdfib in stdfibers if minfiber <= stdfib <= maxfiber]
general_info['EXPTIME']=raw[0].header['EXPTIME']
# general_info['FITS_DESISPEC_VERION']=raw[0].header['FITS_DESISPEC_VERSION']
# general_info['PROC_DESISPEC_VERION']=raw[0].header['PROC_DESISPEC_VERSION']
# general_info['PROC_QuickLook_VERION']=raw[0].header['PROC_QuickLook_VERSION']
#- Get peaks from configuration file
if not flavor != 'arcs' and flavor in calibs:
general_info['B_PEAKS']=peaks['B_PEAKS']
general_info['R_PEAKS']=peaks['R_PEAKS']
general_info['Z_PEAKS']=peaks['Z_PEAKS']
#- Get current time information
general_info['QLrun_datime_UTC']=datetime.datetime.now(tz=pytz.utc).isoformat()
return (raw,general_info)
class Preproc(pas.PipelineAlg):
#- TODO: currently io itself seems to have the preproc inside it. And preproc does bias, pi
# xelflat, etc in one step.
from desispec.maskbits import ccdmask
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="Preproc"
rawtype=astropy.io.fits.hdu.hdulist.HDUList
pas.PipelineAlg.__init__(self,name,rawtype,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
input_raw=args[0][0]
dumpfile=None
if "dumpfile" in kwargs:
dumpfile=kwargs["dumpfile"]
if 'camera' not in kwargs:
#raise qlexceptions.ParameterException("Need Camera to run preprocess on raw files")
log.critical("Need Camera to run preprocess on raw files")
sys.exit()
else:
camera=kwargs["camera"]
if camera.upper() not in input_raw:
raise IOError('Camera {} not in raw input'.format(camera))
if "Bias" in kwargs:
bias=kwargs["Bias"]
else: bias=False
if "Pixflat" in kwargs:
pixflat=kwargs["Pixflat"]
else: pixflat=False
if "Mask" in kwargs:
mask=kwargs["Mask"]
else: mask=False
return self.run_pa(input_raw,camera,bias=bias,pixflat=pixflat,mask=mask,dumpfile=dumpfile)
def run_pa(self,input_raw,camera,bias=False,pixflat=False,mask=True,dumpfile='ttt1.fits'):
import desispec.preproc
rawimage=input_raw[camera.upper()].data
header=input_raw[camera.upper()].header
primary_header=input_raw[0].header
if 'INHERIT' in header and header['INHERIT']:
h0 = input_raw[0].header
for key in h0:
if key not in header:
header[key] = h0[key]
#- WARNING!!!This is a hack for QL to run on old raw images for QLF to be working on old set of data
#if "PROGRAM" not in header:
# log.warning("Temporary hack for QL to add header key PROGRAM. Only to facilitate QLF to work on their dataset. Remove this after some time and run with new data set")
# header["PROGRAM"]= 'dark'
#if header["FLAVOR"] not in [None,'bias','arc','flat','science']:
# header["FLAVOR"] = 'science'
img = desispec.preproc.preproc(rawimage,header,primary_header,bias=bias,pixflat=pixflat,mask=mask)
if img.mask is not None :
img.pix *= (img.mask==0)
if dumpfile is not None:
night = img.meta['NIGHT']
expid = img.meta['EXPID']
io.write_image(dumpfile, img)
log.debug("Wrote intermediate file %s after %s"%(dumpfile,self.name))
return img
class Flexure(pas.PipelineAlg):
""" Use desi_compute_trace_shifts to output modified psf file
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="Flexure"
pas.PipelineAlg.__init__(self,name,im,fr,config,logger)
def run(self,*args,**kwargs):
if 'preprocFile' not in kwargs:
#raise qlexceptions.ParameterException("Must provide preproc file for desi_compute_trace_shifts")
log.critical("Must provide preproc file for desi_compute_trace_shifts")
sys.exit()
if 'inputPSFFile' not in kwargs:
#raise qlexceptions.ParameterException("Must provide input psf file desi_compute_trace_shifts")
log.critical("Must provide input psf file desi_compute_trace_shifts")
sys.exit()
if 'outputPSFFile' not in kwargs:
#raise qlexceptions.ParameterException("Must provide output psf file")
log.critical("Must provide output psf file")
sys.exit()
preproc_file=kwargs["preprocFile"]
input_file=kwargs["inputPSFFile"]
output_file=kwargs["outputPSFFile"]
return self.run_pa(preproc_file,input_file,output_file,args)
def run_pa(self,preproc_file,input_file,output_file,args):
from desispec.util import runcmd
#- Generate modified psf file
cmd="desi_compute_trace_shifts --image {} --psf {} --outpsf {}".format(preproc_file,input_file,output_file)
if runcmd(cmd) !=0:
raise RuntimeError('desi_compute_trace_shifts failed, psftrace not written')
#- return image object to pass to boxcar for extraction
img=args[0]
return img
class BoxcarExtract(pas.PipelineAlg):
from desispec.quicklook.qlboxcar import do_boxcar
from desispec.maskbits import ccdmask
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="BoxcarExtract"
pas.PipelineAlg.__init__(self,name,im,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "PSFFile" not in kwargs:
#raise qlexceptions.ParameterException("Need PSF File")
log.critical("Need PSF File")
sys.exit()
input_image=args[0]
dumpfile=None
if "dumpfile" in kwargs:
dumpfile=kwargs["dumpfile"]
flavor=kwargs["Flavor"]
psf_filename=kwargs["PSFFile"]
#psf = PSF(psf_filename)
tset = read_xytraceset(psf_filename)
boxwidth=kwargs["BoxWidth"]
nspec=kwargs["Nspec"]
quickRes=kwargs["QuickResolution"] if "QuickResolution" in kwargs else False
if "usesigma" in kwargs:
usesigma=kwargs["usesigma"]
else: usesigma = False
if "Wavelength" not in kwargs:
wstart = np.ceil(tset.wavemin)
wstop = np.floor(tset.wavemax)
dw = 0.5
else:
wavelength=kwargs["Wavelength"]
if kwargs["Wavelength"] is not None: #- should be in wstart,wstop,dw format
wstart, wstop, dw = [float(w) for w in wavelength]
else:
wstart = np.ceil(tset.wavemin)
wstop = np.floor(tset.wavemax)
dw = 0.5
wave = np.arange(wstart, wstop+dw/2.0, dw)
if "Specmin" not in kwargs:
specmin=0
else:
specmin=kwargs["Specmin"]
if kwargs["Specmin"] is None:
specmin=0
if "Nspec" not in kwargs:
nspec = tset.nspec
else:
nspec=kwargs["Nspec"]
if nspec is None:
nspec=tset.nspec
specmax = specmin + nspec
camera = input_image.meta['CAMERA'].lower() #- b0, r1, .. z9
spectrograph = int(camera[1])
fibermin = spectrograph*500 + specmin
if "FiberMap" not in kwargs:
fibermap = None
fibers = np.arange(fibermin, fibermin+nspec, dtype='i4')
else:
fibermap=kwargs["FiberMap"]
fibermap = fibermap[fibermin:fibermin+nspec]
fibers = fibermap['FIBER']
if "Outfile" in kwargs:
outfile=kwargs["Outfile"]
else:
outfile=None
maskFile=None
if "MaskFile" in kwargs:
maskFile=kwargs['MaskFile']
#- Add some header keys relevant for this extraction
input_image.meta['NSPEC'] = (nspec, 'Number of spectra')
input_image.meta['WAVEMIN'] = (wstart, 'First wavelength [Angstroms]')
input_image.meta['WAVEMAX'] = (wstop, 'Last wavelength [Angstroms]')
input_image.meta['WAVESTEP']= (dw, 'Wavelength step size [Angstroms]')
return self.run_pa(input_image,flavor,tset,wave,boxwidth,nspec,
fibers=fibers,fibermap=fibermap,dumpfile=dumpfile,
maskFile=maskFile,usesigma=usesigma,quick_resolution=quickRes)
def run_pa(self,input_image,flavor,tset,outwave,boxwidth,nspec,
fibers=None,fibermap=None,dumpfile=None,
maskFile=None,usesigma=False,quick_resolution=False):
from desispec.quicklook.qlboxcar import do_boxcar
#import desispec.tset
flux,ivar,Rdata=do_boxcar(input_image, tset, outwave, boxwidth=boxwidth,
nspec=nspec,maskFile=maskFile,usesigma=usesigma,
quick_resolution=quick_resolution)
#- write to a frame object
qndiag=21
wsigma=None
if quick_resolution:
log.warning("deprecated, please use QFrame format to store sigma values")
wsigma=np.zeros(flux.shape)
if tset.ysig_vs_wave_traceset is not None :
dw = np.gradient(outwave)
for i in range(nspec):
ysig = tset.ysig_vs_wave(i,outwave)
y = tset.y_vs_wave(i,outwave)
dydw = np.gradient(y)/dw
wsigma[i] = ysig/dydw # in A
frame = fr(outwave, flux, ivar, resolution_data=Rdata,fibers=fibers,
meta=input_image.meta, fibermap=fibermap,
wsigma=wsigma,ndiag=qndiag)
if dumpfile is not None:
night = frame.meta['NIGHT']
expid = frame.meta['EXPID']
io.write_frame(dumpfile, frame)
log.debug("Wrote intermediate file %s after %s"%(dumpfile,self.name))
return frame
def get_default_config(self):
return {("BoxWidth",2.5,"Boxcar halfwidth"),
("PSFFile","%%PSFFile","PSFFile to use"),
("DeltaW",0.5,"Binwidth of extrapolated wavelength array"),
("Nspec",500,"number of spectra to extract")
}
# TODO 2d extraction runs fine as well. Will need more testing of the setup.
class Extraction_2d(pas.PipelineAlg):
"""
Offline 2D extraction for offline QuickLook
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="2D Extraction" # using specter.extract.ex2d
pas.PipelineAlg.__init__(self,name,im,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "PSFFile_sp" not in kwargs:
#raise qlexceptions.ParameterException("Need PSF File")
log.critical("Need PSF File")
sys.exit()
from specter.psf import load_psf
input_image=args[0]
psffile=kwargs["PSFFile_sp"]
psf=load_psf(psffile)
if "Wavelength" not in kwargs:
wstart = np.ceil(psf.wmin_all)
wstop = np.floor(psf.wmax_all)
dw = 0.5
else:
wavelength=kwargs["Wavelength"]
if kwargs["Wavelength"] is not None: #- should be in wstart,wstop,dw format
wstart, wstop, dw = [float(w) for w in wavelength]
else:
wstart = np.ceil(psf.wmin_all)
wstop = np.floor(psf.wmax_all)
dw = 0.5
wave = np.arange(wstart, wstop+dw/2.0, dw)
if "Specmin" not in kwargs:
specmin=0
else:
specmin=kwargs["Specmin"]
if kwargs["Specmin"] is None:
specmin=0
if "Nspec" not in kwargs:
nspec = psf.nspec
else:
nspec=kwargs["Nspec"]
if nspec is None:
nspec=psf.nspec
specmax = specmin + nspec
camera = input_image.meta['CAMERA'].lower() #- b0, r1, .. z9
spectrograph = int(camera[1])
fibermin = spectrograph*500 + specmin
if "FiberMap" not in kwargs:
fibermap = None
fibers = np.arange(fibermin, fibermin+nspec, dtype='i4')
else:
fibermap=kwargs["FiberMap"]
fibermap = fibermap[fibermin:fibermin+nspec]
fibers = fibermap['FIBER']
if "Regularize" in kwargs:
regularize=kwargs["Regularize"]
else:
regularize=False
if "ndecorr" in kwargs:
ndecorr=ndecorr
else:
ndecorr=True
bundlesize=25 #- hard coded
if "Outfile" in kwargs:
outfile=kwargs["Outfile"]
else:
outfile=None
if "Nwavestep" in kwargs:
wavesize=kwargs["Nwavestep"]
else:
wavesize=50
return self.run_pa(input_image,psf,specmin,nspec,wave,regularize=regularize,ndecorr=ndecorr, bundlesize=bundlesize, wavesize=wavesize,outfile=outfile,fibers=fibers,fibermap=fibermap)
def run_pa(self,input_image,psf,specmin,nspec,wave,regularize=None,ndecorr=True,bundlesize=25,wavesize=50, outfile=None,fibers=None,fibermap=None):
import specter
from specter.extract import ex2d
flux,ivar,Rdata=ex2d(input_image.pix,input_image.ivar*(input_image.mask==0),psf,specmin,nspec,wave,regularize=regularize,ndecorr=ndecorr,bundlesize=bundlesize,wavesize=wavesize)
#- Augment input image header for output
input_image.meta['NSPEC'] = (nspec, 'Number of spectra')
input_image.meta['WAVEMIN'] = (wave[0], 'First wavelength [Angstroms]')
input_image.meta['WAVEMAX'] = (wave[-1], 'Last wavelength [Angstroms]')
input_image.meta['WAVESTEP']= (wave[1]-wave[0], 'Wavelength step size [Angstroms]')
input_image.meta['SPECTER'] = (specter.__version__, 'https://github.com/desihub/specter')
#input_image.meta['IN_PSF'] = (_trim(psf_file), 'Input spectral PSF')
#input_image.meta['IN_IMG'] = (_trim(input_file), 'Input image')
frame = fr(wave, flux, ivar, resolution_data=Rdata,fibers=fibers, meta=input_image.meta, fibermap=fibermap)
if outfile is not None: #- writing to a frame file if needed.
io.write_frame(outfile,frame)
log.debug("wrote frame output file %s"%outfile)
return frame
class ComputeFiberflat(pas.PipelineAlg):
""" PA to compute fiberflat field correction from a DESI continuum lamp frame
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="ComputeFiberflat"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
input_frame=args[0] #- frame object to calculate fiberflat from
if "outputFile" not in kwargs:
#raise qlexceptions.ParameterException("Need output file name to write fiberflat File")
log.critical("Need output file name to write fiberflat File")
sys.exit()
outputfile=kwargs["outputFile"]
return self.run_pa(input_frame,outputfile)
def run_pa(self,input_frame,outputfile):
from desispec.fiberflat import compute_fiberflat
import desispec.io.fiberflat as ffIO
fiberflat=compute_fiberflat(input_frame)
ffIO.write_fiberflat(outputfile,fiberflat,header=input_frame.meta)
log.debug("Fiberflat file wrtten. Exiting Quicklook for this configuration") #- File written no need to go further
# !!!!! SAMI to whoever wrote this
# PA's or any other components *CANNOT* call sys.exit()!! this needs to be fixed!!!!!
sys.exit(0)
class ComputeFiberflat_QL(pas.PipelineAlg):
""" PA to compute fiberflat field correction from a DESI continuum lamp frame
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="ComputeFiberflat"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
input_frame=args[0] #- frame object to calculate fiberflat from
if "outputFile" not in kwargs:
#raise qlexceptions.ParameterException("Need output file name to write fiberflat File")
log.critical("Need output file name to write fiberflat File")
sys.exit()
outputfile=kwargs["outputFile"]
return self.run_pa(input_frame,outputfile)
def run_pa(self,frame,outputfile):
from desispec.fiberflat import FiberFlat
import desispec.io.fiberflat as ffIO
from desispec.linalg import cholesky_solve
nwave=frame.nwave
nfibers=frame.nspec
wave = frame.wave #- this will become part of output too
flux = frame.flux
sumFlux=np.zeros((nwave))
realFlux=np.zeros(flux.shape)
ivar = frame.ivar*(frame.mask==0)
#deconv
for fib in range(nfibers):
Rf=frame.R[fib].todense()
B=flux[fib]
try:
realFlux[fib]=cholesky_solve(Rf,B)
except:
log.warning("cholesky_solve failed for fiber {}, using numpy.linalg.solve instead.".format(fib))
realFlux[fib]=np.linalg.solve(Rf,B)
sumFlux+=realFlux[fib]
#iflux=nfibers/sumFlux
flat = np.zeros(flux.shape)
flat_ivar=np.zeros(ivar.shape)
avg=sumFlux/nfibers
for fib in range(nfibers):
Rf=frame.R[fib]
# apply and reconvolute
M=Rf.dot(avg)
M0=(M==0)
flat[fib]=(~M0)*flux[fib]/(M+M0) +M0
flat_ivar[fib]=ivar[fib]*M**2
fibflat=FiberFlat(frame.wave.copy(),flat,flat_ivar,frame.mask.copy(),avg)
#fiberflat=compute_fiberflat(input_frame)
ffIO.write_fiberflat(outputfile,fibflat,header=frame.meta)
log.info("Wrote fiberflat file {}".format(outputfile))
fflatfile = ffIO.read_fiberflat(outputfile)
return fflatfile
class ApplyFiberFlat(pas.PipelineAlg):
"""
PA to Apply the fiberflat field to the given frame
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="ApplyFiberFlat"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "FiberFlatFile" not in kwargs:
#raise qlexceptions.ParameterException("Need Fiberflat file")
log.critical("Need Fiberflat file")
sys.exit()
input_frame=args[0]
fiberflat=kwargs["FiberFlatFile"]
return self.run_pa(input_frame,fiberflat)
def run_pa(self,input_frame,fiberflat):
from desispec.fiberflat import apply_fiberflat
apply_fiberflat(input_frame,fiberflat)
return input_frame
class ApplyFiberFlat_QL(pas.PipelineAlg):
"""
PA to Apply the fiberflat field (QL) to the given frame
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="Apply FiberFlat"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "FiberFlatFile" not in kwargs:
#raise qlexceptions.ParameterException("Need Fiberflat file")
log.critical("Need Fiberflat file")
sys.exit()
input_frame=args[0]
dumpfile=None
if "dumpfile" in kwargs:
dumpfile=kwargs["dumpfile"]
fiberflat=kwargs["FiberFlatFile"]
return self.run_pa(input_frame,fiberflat,dumpfile=dumpfile)
def run_pa(self,input_frame,fiberflat,dumpfile=None):
from desispec.quicklook.quickfiberflat import apply_fiberflat
fframe=apply_fiberflat(input_frame,fiberflat)
if dumpfile is not None:
night = fframe.meta['NIGHT']
expid = fframe.meta['EXPID']
io.write_frame(dumpfile, fframe)
log.debug("Wrote intermediate file %s after %s"%(dumpfile,self.name))
return fframe
class ComputeSky(pas.PipelineAlg):
""" PA to compute sky model from a DESI frame
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="ComputeSky"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "FiberFlatFile" not in kwargs: #- need this as fiberflat has to apply to frame first
#raise qlexceptions.ParameterException("Need Fiberflat frame file")
log.critical("Need Fiberflat frame file!")
sys.exit()
input_frame=args[0] #- frame object to calculate sky from
if "FiberMap" in kwargs:
fibermap=kwargs["FiberMap"]
if "Outfile" not in kwargs:
#raise qlexceptions.ParameterException("Need output file name to write skymodel")
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
fiberflat=kwargs["FiberFlatFile"]
outputfile=kwargs["Outfile"]
return self.run_pa(input_frame,fiberflat,outputfile)
def run_pa(self,input_frame,fiberflat,outputfile):
from desispec.fiberflat import apply_fiberflat
from desispec.sky import compute_sky
from desispec.io.sky import write_sky
#- First apply fiberflat to sky fibers
apply_fiberflat(input_frame,fiberflat)
#- calculate the model
skymodel=compute_sky(input_frame)
write_sky(outputfile,skymodel,input_frame.meta)
log.debug("Sky Model file wrtten. Exiting pipeline for this configuration")
sys.exit(0)
class ComputeSky_QL(pas.PipelineAlg):
""" PA to compute sky model from a DESI frame
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="ComputeSky_QL"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
input_frame=args[0] #- frame object to calculate sky from. Should be fiber flat corrected
if "FiberMap" in kwargs:
fibermap=kwargs["FiberMap"]
else: fibermap=None
if "Apply_resolution" in kwargs:
apply_resolution=kwargs["Apply_resolution"]
if "Outfile" not in kwargs:
#raise qlexceptions.ParameterException("Need output file name to write skymodel")
log.critical("Need output file name to write skymodel!")
sys.exit()
outputfile=kwargs["Outfile"]
return self.run_pa(input_frame,outputfile,fibermap=fibermap,apply_resolution=apply_resolution)
def run_pa(self,input_frame,outputfile,fibermap=None,apply_resolution=False): #- input frame should be already fiberflat fielded
from desispec.io.sky import write_sky
from desispec.quicklook.quicksky import compute_sky
skymodel=compute_sky(input_frame,fibermap,apply_resolution=apply_resolution)
write_sky(outputfile,skymodel,input_frame.meta)
# SEE ABOVE COMMENT!!!!
log.debug("Sky Model file wrtten. Exiting the pipeline for this configuration")
sys.exit(0)
class SkySub(pas.PipelineAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SkySub"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "SkyFile" not in kwargs:
#raise qlexceptions.ParameterException("Need Skymodel file")
log.critical("Need Skymodel file!")
sys.exit()
input_frame=args[0] #- this must be flat field applied before sky subtraction in the pipeline
skyfile=kwargs["SkyFile"] #- Read sky model file itself from an argument
from desispec.io.sky import read_sky
skymodel=read_sky(skyfile)
return self.run_pa(input_frame,skymodel)
def run_pa(self,input_frame,skymodel):
from desispec.sky import subtract_sky
subtract_sky(input_frame,skymodel)
return (input_frame, skymodel)
class SkySub_QL(pas.PipelineAlg):
"""
This is for QL Sky subtraction. The input frame object should be fiber flat corrected.
Unlike offline, if no skymodel file is given as input, a sky compute method is called
to create a skymodel object and then subtraction is performed. Outputing that skymodel
to a file is optional and can be configured.
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SkySub_QL"
pas.PipelineAlg.__init__(self,name,fr,type(tuple),config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
input_frame=args[0] #- this must be flat field applied before sky subtraction in the pipeline
dumpfile=None
if "dumpfile" in kwargs:
dumpfile=kwargs["dumpfile"]
if "SkyFile" in kwargs:
from desispec.io.sky import read_sky
skyfile=kwargs["SkyFile"] #- Read sky model file itself from an argument
log.debug("Using given sky file %s for subtraction"%skyfile)
skymodel=read_sky(skyfile)
else:
if "Outskyfile" in kwargs:
outskyfile=kwargs["Outskyfile"]
else: outskyfile=None
log.debug("No sky file given. Computing sky first")
from desispec.quicklook.quicksky import compute_sky
if "Apply_resolution" in kwargs:
apply_resolution=kwargs["Apply_resolution"]
log.debug("Apply fiber to fiber resolution variation in computing sky")
else: apply_resolution = False
fibermap=input_frame.fibermap
skymodel=compute_sky(input_frame,fibermap,apply_resolution=apply_resolution)
if outskyfile is not None:
from desispec.io.sky import write_sky
log.debug("writing an output sky model file %s "%outskyfile)
write_sky(outskyfile,skymodel,input_frame.meta)
#- now do the subtraction
return self.run_pa(input_frame,skymodel,dumpfile=dumpfile)
def run_pa(self,input_frame,skymodel,dumpfile=None):
from desispec.quicklook.quicksky import subtract_sky
sframe=subtract_sky(input_frame,skymodel)
if dumpfile is not None:
night = sframe.meta['NIGHT']
expid = sframe.meta['EXPID']
io.write_frame(dumpfile, sframe)
log.debug("Wrote intermediate file %s after %s"%(dumpfile,self.name))
return (sframe,skymodel)
class ApplyFluxCalibration(pas.PipelineAlg):
"""PA to apply flux calibration to the given sframe
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="Apply Flux Calibration"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0][0])):
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0][0])))
input_frame=args[0][0]
if "outputfile" in kwargs:
outputfile=kwargs["outputfile"]
else:
log.critical("Must provide output file to write cframe")
sys.exit()
return self.run_pa(input_frame,outputfile=outputfile)
def run_pa(self,frame,outputfile=None):
night=frame.meta['NIGHT']
camera=frame.meta['CAMERA']
expid=frame.meta['EXPID']
rawfile=findfile('raw',night,expid,rawdata_dir=os.environ['QL_SPEC_DATA'])
rawfits=fits.open(rawfile)
primary_header=rawfits[0].header
image=read_raw(rawfile,camera)
fluxcalib_filename=findcalibfile([image.meta,primary_header],"FLUXCALIB")
fluxcalib = read_average_flux_calibration(fluxcalib_filename)
log.info("read average calib in {}".format(fluxcalib_filename))
seeing = frame.meta["SEEING"]
airmass = frame.meta["AIRMASS"]
exptime = frame.meta["EXPTIME"]
exposure_calib = fluxcalib.value(seeing=seeing,airmass=airmass)
for q in range(frame.nspec) :
fiber_calib=np.interp(frame.wave[q],fluxcalib.wave,exposure_calib)*exptime
inv_calib = (fiber_calib>0)/(fiber_calib + (fiber_calib==0))
frame.flux[q] *= inv_calib
frame.ivar[q] *= fiber_calib**2*(fiber_calib>0)
write_qframe(outputfile,frame)
log.info("Wrote flux calibrated frame file %s after %s"%(outputfile,self.name))
return frame
class ResolutionFit(pas.PipelineAlg):
"""
Fitting of Arc lines on extracted arc spectra, polynomial expansion of the fitted sigmas, and updating
the coefficients to the new traceset file
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="ResolutionFit"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "PSFoutfile" not in kwargs:
#raise qlexceptions.ParameterException("Missing psfoutfile in the arguments")
log.critical("Missing psfoutfile in the arguments!")
sys.exit()
psfoutfile=kwargs["PSFoutfile"]
psfinfile=kwargs["PSFinputfile"]
if "usesigma" in kwargs:
usesigma=kwargs["usesigma"]
else: usesigma = False
tset = read_xytraceset(psfinfile)
domain=(tset.wavemin,tset.wavemax)
input_frame=args[0]
linelist=None
if "Linelist" in kwargs:
linelist=kwargs["Linelist"]
npoly=2
if "NPOLY" in kwargs:
npoly=kwargs["NPOLY"]
nbins=2
if "NBINS" in kwargs:
nbins=kwargs["NBINS"]
return self.run_pa(input_frame,psfinfile,psfoutfile,usesigma,linelist=linelist,npoly=npoly,nbins=nbins,domain=domain)
def run_pa(self,input_frame,psfinfile,outfile,usesigma,linelist=None,npoly=2,nbins=2,domain=None):
from desispec.quicklook.arcprocess import process_arc,write_psffile
from desispec.quicklook.palib import get_resolution
wcoeffs,wavemin,wavemax =process_arc(input_frame,linelist=linelist,npoly=npoly,nbins=nbins,domain=domain)
write_psffile(psfinfile,wcoeffs,wavemin,wavemax,outfile)
log.debug("Wrote xytraceset file {}".format(outfile))
#- update the arc frame resolution from new coeffs
tset = read_xytraceset(outfile)
input_frame.resolution_data=get_resolution(input_frame.wave,input_frame.nspec,tset,usesigma=usesigma)
return (tset,input_frame)
# =======================
# qproc algorithms
# =======================
from desispec.sky import SkyModel
from desispec.qproc.io import write_qframe
from desispec.qproc.qextract import qproc_boxcar_extraction
from desispec.qproc.qfiberflat import qproc_apply_fiberflat
from desispec.qproc.qsky import qproc_sky_subtraction
class Extract_QP(pas.PipelineAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="Extract_QP"
pas.PipelineAlg.__init__(self,name,im,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "PSFFile" not in kwargs:
#raise qlexceptions.ParameterException("Need PSF File")
log.critical("Need PSF file!")
sys.exit()
input_image=args[0]
dumpfile=None
if "dumpfile" in kwargs:
dumpfile=kwargs["dumpfile"]
psf_filename=kwargs["PSFFile"]
print("psf_filename=",psf_filename)
traceset = read_xytraceset(psf_filename)
width=kwargs["FullWidth"]
nspec=kwargs["Nspec"]
if "Wavelength" not in kwargs:
wstart = np.ceil(traceset.wavemin)
wstop = np.floor(traceset.wavemax)
dw = 0.5
else:
wavelength=kwargs["Wavelength"]
print('kwargs["Wavelength"]=',kwargs["Wavelength"])
if kwargs["Wavelength"] is not None: #- should be in wstart,wstop,dw format
wstart, wstop, dw = [float(w) for w in wavelength]
else:
wstart = np.ceil(traceset.wmin)
wstop = np.floor(traceset.wmax)
dw = 0.5
wave = np.arange(wstart, wstop+dw/2.0, dw)
if "Specmin" not in kwargs:
specmin=0
else:
specmin=kwargs["Specmin"]
if kwargs["Specmin"] is None:
specmin=0
if "Nspec" not in kwargs:
nspec = traceset.nspec
else:
nspec=kwargs["Nspec"]
if nspec is None:
nspec=traceset.nspec
specmax = specmin + nspec
camera = input_image.meta['CAMERA'].lower() #- b0, r1, .. z9
spectrograph = int(camera[1])
fibermin = spectrograph*500 + specmin
if "FiberMap" not in kwargs:
fibermap = None
fibers = np.arange(fibermin, fibermin+nspec, dtype='i4')
else:
fibermap=kwargs["FiberMap"]
fibermap = fibermap[fibermin:fibermin+nspec]
fibers = fibermap['FIBER']
if "Outfile" in kwargs:
outfile=kwargs["Outfile"]
else:
outfile=None
maskFile=None
if "MaskFile" in kwargs:
maskFile=kwargs['MaskFile']
#- Add some header keys relevant for this extraction
input_image.meta['NSPEC'] = (nspec, 'Number of spectra')
input_image.meta['WAVEMIN'] = (wstart, 'First wavelength [Angstroms]')
input_image.meta['WAVEMAX'] = (wstop, 'Last wavelength [Angstroms]')
input_image.meta['WAVESTEP']= (dw, 'Wavelength step size [Angstroms]')
return self.run_pa(input_image,traceset,wave,width,nspec,
fibers=fibers,fibermap=fibermap,dumpfile=dumpfile,
maskFile=maskFile)
def run_pa(self,input_image,traceset,outwave,width,nspec,
fibers=None,fibermap=None,dumpfile=None,
maskFile=None):
qframe = qproc_boxcar_extraction(traceset,input_image,fibers=fibers, width=width, fibermap=fibermap)
if dumpfile is not None:
write_qframe(dumpfile, qframe, fibermap=fibermap)
log.debug("Wrote intermediate file %s after %s"%(dumpfile,self.name))
return qframe
def get_default_config(self):
return {("FullWidth",7,"Boxcar full width"),
("PSFFile","%%PSFFile","PSFFile to use"),
("DeltaW",0.5,"Binwidth of extrapolated wavelength array"),
("Nspec",500,"number of spectra to extract")
}
class ComputeFiberflat_QP(pas.PipelineAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="ComputeFiberflat"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
input_frame=args[0] #- frame object to calculate fiberflat from
if "outputFile" not in kwargs:
raise qlexceptions.ParameterException("Need output file name to write fiberflat File")
outputfile=kwargs["outputFile"]
return self.run_pa(input_frame,outputfile)
def run_pa(self,qframe,outputfile):
from desispec.qproc.qfiberflat import qproc_compute_fiberflat
import desispec.io.fiberflat as ffIO
fibflat=qproc_compute_fiberflat(qframe)
ffIO.write_fiberflat(outputfile,fibflat,header=qframe.meta)
log.info("Wrote fiberflat file {}".format(outputfile))
fflatfile = ffIO.read_fiberflat(outputfile)
return fflatfile
class ApplyFiberFlat_QP(pas.PipelineAlg):
"""
PA to Apply the fiberflat field (QP) to the given qframe
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="Apply FiberFlat"
pas.PipelineAlg.__init__(self,name,fr,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
if "FiberFlatFile" not in kwargs:
#raise qlexceptions.ParameterException("Need Fiberflat file")
log.critical("Need Fiberflat file!")
sys.exit()
input_qframe=args[0]
dumpfile=None
if "dumpfile" in kwargs:
dumpfile=kwargs["dumpfile"]
fiberflat=kwargs["FiberFlatFile"]
return self.run_pa(input_qframe,fiberflat,dumpfile=dumpfile)
def run_pa(self,qframe,fiberflat,dumpfile=None):
qproc_apply_fiberflat(qframe,fiberflat)
if dumpfile is not None:
night = qframe.meta['NIGHT']
expid = qframe.meta['EXPID']
write_qframe(dumpfile, qframe)
log.debug("Wrote intermediate file %s after %s"%(dumpfile,self.name))
return qframe
class SkySub_QP(pas.PipelineAlg):
"""
Sky subtraction. The input frame object should be fiber flat corrected.
No sky model is saved for now
"""
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SkySub_QP"
pas.PipelineAlg.__init__(self,name,fr,type(tuple),config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
#raise qlexceptions.ParameterException("Missing input parameter")
log.critical("Missing input parameter!")
sys.exit()
if not self.is_compatible(type(args[0])):
#raise qlexceptions.ParameterException("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
log.critical("Incompatible input!")
sys.exit("Incompatible input. Was expecting %s got %s"%(type(self.__inpType__),type(args[0])))
input_qframe=args[0] #- this must be flat field applied before sky subtraction in the pipeline
dumpfile=None
if "dumpfile" in kwargs:
dumpfile=kwargs["dumpfile"]
#- now do the subtraction
return self.run_pa(input_qframe,dumpfile=dumpfile)
def run_pa(self,qframe,dumpfile=None):
skymodel = qproc_sky_subtraction(qframe,return_skymodel=True)
#qproc_sky_subtraction(qframe)
if dumpfile is not None:
night = qframe.meta['NIGHT']
expid = qframe.meta['EXPID']
write_qframe(dumpfile, qframe)
log.debug("Wrote intermediate file %s after %s"%(dumpfile,self.name))
# convert for QA
# sframe=qframe.asframe()
# tmpsky=np.interp(sframe.wave,qframe.wave[0],skymodel[0])
# skymodel = SkyModel(sframe.wave,np.tile(tmpsky,(sframe.nspec,1)),np.ones(sframe.flux.shape),np.zeros(sframe.flux.shape,dtype="int32"))
return (qframe,skymodel)
| desihub/desispec | py/desispec/quicklook/procalgs.py | Python | bsd-3-clause | 51,722 |
# -*- coding: utf-8 -*-
# Copyright 2011 Jiří Janoušek <janousek.jiri@gmail.com>
# Copyright 2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
import logging
logger = logging.getLogger("zim.objectmanager")
from zim.signals import SignalEmitter, SIGNAL_AFTER
from zim.utils import WeakSet
from zim.config.dicts import ConfigDict, String
import zim.plugins
## TODO remove singleton contruction, add ref to plugin manager
## to allow fallback object widget to have toolbar to load plugin
class _ObjectManager(object):
'''Manages custom objects.'''
def __init__(self):
self.factories = {}
self.objects = {'fallback': WeakSet()}
self.window_extensions = {}
def register_object(self, type, factory, window_extension=None):
'''Register a factory method or class for a specific object type.
@param type: the object type as string (unique name)
@param factory: can be either an object class or a method,
@param window_extension: dictionary - the plugin related window_extension
should callable and return objects. When constructing objects
this factory will be called as::
factory(attrib, text)
Where:
- C{attrib} is a dict with attributes
- C{text} is the main text source of the object
@returns: a previously set factory for C{type} or C{None}
'''
logger.debug('Registered object %s', type)
type = type.lower()
old = self.factories.get(type)
self.factories[type] = factory
self.objects[type] = WeakSet()
self.window_extensions[type] = window_extension
return old
def unregister_object(self, type):
'''Unregister a specific object type.
@returns: C{True} on success, C{False} if given type has not
been registered.
'''
type = type.lower()
if type in self.factories:
del self.factories[type]
del self.objects[type]
return True
else:
return False
def is_registered(self, type):
'''Returns C{True} if object type has already been registered.'''
return type.lower() in self.factories
def get_object(self, type, attrib, text):
'''Returns a new object for given type with given attributes
@param type: the object type as string
@param attrib: dict with attributes
@param text: main source of the object
@returns: a new object instance, either created by the factory
method for C{type}, or an instance of L{FallbackObject}
'''
type = type.lower()
if type in self.factories:
factory = self.factories[type]
obj = factory(attrib, text)
self.objects[type].add(obj)
else:
factory = FallbackObject
obj = factory(attrib, text)
self.objects['fallback'].add(obj)
return obj
def get_active_objects(self, type):
'''Returns an iterator for active objects for a specific type.
(Objects are 'active' as long as they are not destroyed.)
'''
if type in self.objects:
return iter(self.objects[type])
else:
return []
def find_plugin(self, type):
'''Find a plugin to handle a specific object type. Intended to
suggest plugins to the user that can be loaded.
@param type: object type as string
@returns: a 5-tuple of the plugin name, a boolean for the
dependency check, the plugin class, or C{None} and the related plugin window_extension
'''
for name in zim.plugins.PluginManager.list_installed_plugins(): # XXX
try:
klass = zim.plugins.PluginManager.get_plugin_class(name) # XXX
types = klass.plugin_info.get('object_types')
if types and type in types:
activatable = klass.check_dependencies_ok()
win_ext = self.window_extensions[type] if type in self.window_extensions else None
return (name, klass.plugin_info['name'], activatable, klass, win_ext)
except:
logger.exception('Could not load plugin %s', name)
continue
return None
ObjectManager = _ObjectManager() # Singleton object
class CustomObjectClass(SignalEmitter):
'''
Base Class for custom objects.
Signal:
* 'modified-changed' -- modification state has been changed
'''
OBJECT_ATTR = {
'type': String('object')
}
# define signals we want to use - (closure type, return type and arg types)
__signals__ = {
'modified-changed': (SIGNAL_AFTER, None, ()),
}
def __init__(self, attrib, data):
self._attrib = ConfigDict(attrib)
self._attrib.define(self.OBJECT_ATTR)
self._data = data if data is not None else ''
self.modified = False
def get_modified(self):
'''Returns True if object has been modified.'''
return self.modified
def set_modified(self, modified):
'''Sets modification state of object and emits signal if needed.'''
if self.modified != modified:
self.modified = modified
self.emit("modified-changed")
def get_widget(self):
'''Returns a new gtk widget for this object'''
raise NotImplemented
def get_attrib(self):
'''Returns object attributes. The 'type' attribute stores type of object.'''
return self._attrib.dump()
def get_data(self):
'''Returns serialized data of object.'''
return self._data
def dump(self, format, dumper, linker=None):
'''Dumps current object. Returns None if format is not supported.'''
return None
class FallbackObject(CustomObjectClass):
'''Fallback object displays data as TextView and
preserves attributes unmodified.
'''
def __init__(self, attrib, data):
CustomObjectClass.__init__(self, attrib, data)
self.buffer = None
def get_widget(self):
import gtk
from zim.gui.objectmanager import FallbackObjectWidget
if not self.buffer:
self.buffer = gtk.TextBuffer()
self.buffer.set_text(self._data)
self.buffer.connect('modified-changed', self.on_modified_changed)
self.buffer.set_modified(False)
self._data = None
type = self._attrib['type']
return FallbackObjectWidget(type, self.buffer)
def get_data(self):
if self.buffer:
bounds = self.buffer.get_bounds()
return self.buffer.get_text(bounds[0], bounds[1])
else:
return self._data
def set_data(self, text):
if self.buffer:
self.buffer.set_text(text)
else:
self._data = text
def on_modified_changed(self, buffer):
'''Callback for TextBuffer's modifications.'''
if buffer.get_modified():
self.set_modified(True)
buffer.set_modified(False)
def set_label(self, label):
'''Sets label at the top area of widget.'''
self.label.set_text(label)
| Osndok/zim-desktop-wiki | zim/objectmanager.py | Python | gpl-2.0 | 6,232 |
from cssselect import HTMLTranslator
from lxml import etree
import re
from capybara.utils import inner_content
class HTML(object):
def __init__(self, source):
if not source:
source = "<html/>"
parser = etree.HTMLParser(encoding="utf-8")
tree = etree.HTML(source, parser=parser)
for element in tree.xpath("//textarea"):
content = inner_content(element)
content = re.sub("\A\n", "", content)
for child in element.getchildren():
element.remove(child)
element.text = content
self.tree = tree
def css(self, css):
return etree.XPath(HTMLTranslator().css_to_xpath(css))(self.tree)
def xpath(self, xpath):
return self.tree.xpath(xpath)
| elliterate/capybara.py | capybara/html.py | Python | mit | 780 |
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# Copyright © 2017 Dylan Baker
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
from __future__ import division
from datetime import timedelta
from datetime import datetime
from collections import deque
from cStringIO import StringIO
import logging
import mimetypes
import os
import re
import shlex
import subprocess
import email
from email.generator import Generator
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import urwid
import magic
from twisted.internet import reactor
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
def split_commandline(s, comments=False, posix=True):
"""
splits semi-colon separated commandlines
"""
# shlex seems to remove unescaped quotes and backslashes
s = s.replace('\\', '\\\\')
s = s.replace('\'', '\\\'')
s = s.replace('\"', '\\\"')
# encode s to utf-8 for shlex
if isinstance(s, unicode):
s = s.encode('utf-8')
lex = shlex.shlex(s, posix=posix)
lex.whitespace_split = True
lex.whitespace = ';'
if not comments:
lex.commenters = ''
return list(lex)
def split_commandstring(cmdstring):
"""
split command string into a list of strings to pass on to subprocess.Popen
and the like. This simply calls shlex.split but works also with unicode
bytestrings.
"""
if isinstance(cmdstring, unicode):
cmdstring = cmdstring.encode('utf-8', errors='ignore')
return shlex.split(cmdstring)
def string_sanitize(string, tab_width=8):
r"""
strips, and replaces non-printable characters
:param tab_width: number of spaces to replace tabs with. Read from
`globals.tabwidth` setting if `None`
:type tab_width: int or `None`
>>> string_sanitize(' foo\rbar ', 8)
' foobar '
>>> string_sanitize('foo\tbar', 8)
'foo bar'
>>> string_sanitize('foo\t\tbar', 8)
'foo bar'
"""
string = string.replace('\r', '')
lines = list()
for line in string.split('\n'):
tab_count = line.count('\t')
if tab_count > 0:
line_length = 0
new_line = list()
for i, chunk in enumerate(line.split('\t')):
line_length += len(chunk)
new_line.append(chunk)
if i < tab_count:
next_tab_stop_in = tab_width - (line_length % tab_width)
new_line.append(' ' * next_tab_stop_in)
line_length += next_tab_stop_in
lines.append(''.join(new_line))
else:
lines.append(line)
return '\n'.join(lines)
def string_decode(string, enc='ascii'):
"""
safely decodes string to unicode bytestring, respecting `enc` as a hint.
"""
if enc is None:
enc = 'ascii'
try:
string = unicode(string, enc, errors='replace')
except LookupError: # malformed enc string
string = string.decode('ascii', errors='replace')
except TypeError: # already unicode
pass
return string
def shorten(string, maxlen):
"""shortens string if longer than maxlen, appending ellipsis"""
if 1 < maxlen < len(string):
string = string[:maxlen - 1] + u'\u2026'
return string[:maxlen]
def shorten_author_string(authors_string, maxlength):
"""
Parse a list of authors concatenated as a text string (comma
separated) and smartly adjust them to maxlength.
1) If the complete list of sender names does not fit in maxlength, it
tries to shorten names by using only the first part of each.
2) If the list is still too long, hide authors according to the
following priority:
- First author is always shown (if too long is shorten with ellipsis)
- If possible, last author is also shown (if too long, uses ellipsis)
- If there are more than 2 authors in the thread, show the
maximum of them. More recent senders have higher priority.
- If it is finally necessary to hide any author, an ellipsis
between first and next authors is added.
"""
# I will create a list of authors by parsing author_string. I use
# deque to do popleft without performance penalties
authors = deque()
# If author list is too long, it uses only the first part of each
# name (gmail style)
short_names = len(authors_string) > maxlength
for au in authors_string.split(", "):
if short_names:
author_as_list = au.split()
if len(author_as_list) > 0:
authors.append(author_as_list[0])
else:
authors.append(au)
# Author chain will contain the list of author strings to be
# concatenated using commas for the final formatted author_string.
authors_chain = deque()
if len(authors) == 0:
return u''
# reserve space for first author
first_au = shorten(authors.popleft(), maxlength)
remaining_length = maxlength - len(first_au)
# Tries to add an ellipsis if no space to show more than 1 author
if authors and maxlength > 3 and remaining_length < 3:
first_au = shorten(first_au, maxlength - 3)
remaining_length += 3
# Tries to add as more authors as possible. It takes into account
# that if any author will be hidden, and ellipsis should be added
while authors and remaining_length >= 3:
au = authors.pop()
if len(au) > 1 and (remaining_length == 3 or (authors and
remaining_length < 7)):
authors_chain.appendleft(u'\u2026')
break
else:
if authors:
# 5= ellipsis + 2 x comma and space used as separators
au_string = shorten(au, remaining_length - 5)
else:
# 2 = comma and space used as separator
au_string = shorten(au, remaining_length - 2)
remaining_length -= len(au_string) + 2
authors_chain.appendleft(au_string)
# Add the first author to the list and concatenate list
authors_chain.appendleft(first_au)
authorsstring = ', '.join(authors_chain)
return authorsstring
def pretty_datetime(d):
"""
translates :class:`datetime` `d` to a "sup-style" human readable string.
>>> now = datetime.now()
>>> now.strftime('%c')
'Sat 31 Mar 2012 14:47:26 '
>>> pretty_datetime(now)
u'just now'
>>> pretty_datetime(now - timedelta(minutes=1))
u'1min ago'
>>> pretty_datetime(now - timedelta(hours=5))
u'5h ago'
>>> pretty_datetime(now - timedelta(hours=12))
u'02:54am'
>>> pretty_datetime(now - timedelta(days=1))
u'yest 02pm'
>>> pretty_datetime(now - timedelta(days=2))
u'Thu 02pm'
>>> pretty_datetime(now - timedelta(days=7))
u'Mar 24'
>>> pretty_datetime(now - timedelta(days=356))
u'Apr 2011'
"""
ampm = d.strftime('%p').lower()
if len(ampm):
hourfmt = '%I' + ampm
hourminfmt = '%I:%M' + ampm
else:
hourfmt = '%Hh'
hourminfmt = '%H:%M'
now = datetime.now()
today = now.date()
if d.date() == today or d > now - timedelta(hours=6):
delta = datetime.now() - d
if delta.seconds < 60:
string = 'just now'
elif delta.seconds < 3600:
string = '%dmin ago' % (delta.seconds // 60)
elif delta.seconds < 6 * 3600:
string = '%dh ago' % (delta.seconds // 3600)
else:
string = d.strftime(hourminfmt)
elif d.date() == today - timedelta(1):
string = d.strftime('yest ' + hourfmt)
elif d.date() > today - timedelta(7):
string = d.strftime('%a ' + hourfmt)
elif d.year != today.year:
string = d.strftime('%b %Y')
else:
string = d.strftime('%b %d')
return string_decode(string, 'UTF-8')
def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
"""
try:
proc = subprocess.Popen(
cmdlist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin is not None else None)
out, err = proc.communicate(stdin)
ret = proc.returncode
except OSError as e:
out = b''
err = e.strerror
ret = e.errno
out = string_decode(out, urwid.util.detected_encoding)
err = string_decode(err, urwid.util.detected_encoding)
return out, err, ret
def call_cmd_async(cmdlist, stdin=None, env=None):
"""
get a shell commands output, error message and return value as a deferred.
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str
:return: deferred that calls back with triple of stdout, stderr and
return value of the shell command
:rtype: `twisted.internet.defer.Deferred`
"""
class _EverythingGetter(ProcessProtocol):
def __init__(self, deferred):
self.deferred = deferred
self.outBuf = StringIO()
self.errBuf = StringIO()
self.outReceived = self.outBuf.write
self.errReceived = self.errBuf.write
def processEnded(self, status):
termenc = urwid.util.detected_encoding
out = string_decode(self.outBuf.getvalue(), termenc)
err = string_decode(self.errBuf.getvalue(), termenc)
if status.value.exitCode == 0:
self.deferred.callback(out)
else:
terminated_obj = status.value
terminated_obj.stderr = err
self.deferred.errback(terminated_obj)
d = Deferred()
environment = os.environ
if env is not None:
environment.update(env)
logging.debug('ENV = %s', environment)
logging.debug('CMD = %s', cmdlist)
proc = reactor.spawnProcess(_EverythingGetter(d), executable=cmdlist[0],
env=environment,
args=cmdlist)
if stdin:
logging.debug('writing to stdin')
proc.write(stdin)
proc.closeStdin()
return d
def guess_mimetype(blob):
"""
uses file magic to determine the mime-type of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: mime-type, falls back to 'application/octet-stream'
:rtype: str
"""
mimetype = 'application/octet-stream'
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However, it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_TYPE)
m.load()
magictype = m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
# cf. issue #841
magictype = magic.from_buffer(blob, mime=True) or magictype
else:
raise Exception('Unknown magic API')
# libmagic does not always return proper mimetype strings, cf. issue #459
if re.match(r'\w+\/\w+', magictype):
mimetype = magictype
return mimetype
def guess_encoding(blob):
"""
uses file magic to determine the encoding of the given data blob.
:param blob: file content as read by file.read()
:type blob: data
:returns: encoding
:rtype: str
"""
# this is a bit of a hack to support different versions of python magic.
# Hopefully at some point this will no longer be necessary
#
# the version with open() is the bindings shipped with the file source from
# http://darwinsys.com/file/ - this is what is used by the python-magic
# package on Debian/Ubuntu. However it is not available on pypi/via pip.
#
# the version with from_buffer() is available at
# https://github.com/ahupp/python-magic and directly installable via pip.
#
# for more detail see https://github.com/pazz/alot/pull/588
if hasattr(magic, 'open'):
m = magic.open(magic.MAGIC_MIME_ENCODING)
m.load()
return m.buffer(blob)
elif hasattr(magic, 'from_buffer'):
m = magic.Magic(mime_encoding=True)
return m.from_buffer(blob)
else:
raise Exception('Unknown magic API')
def libmagic_version_at_least(version):
"""
checks if the libmagic library installed is more recent than a given
version.
:param version: minimum version expected in the form XYY (i.e. 5.14 -> 514)
with XYY >= 513
"""
if hasattr(magic, 'open'):
magic_wrapper = magic._libraries['magic']
elif hasattr(magic, 'from_buffer'):
magic_wrapper = magic.libmagic
else:
raise Exception('Unknown magic API')
if not hasattr(magic_wrapper, 'magic_version'):
# The magic_version function has been introduced in libmagic 5.13,
# if it's not present, we can't guess right, so let's assume False
return False
return magic_wrapper.magic_version >= version
# TODO: make this work on blobs, not paths
def mimewrap(path, filename=None, ctype=None):
"""Take the contents of the given path and wrap them into an email MIME
part according to the content type. The content type is auto detected from
the actual file contents and the file name if it is not given.
:param path: the path to the file contents
:type path: str
:param filename: the file name to use in the generated MIME part
:type filename: str or None
:param ctype: the content type of the file contents in path
:type ctype: str or None
:returns: the message MIME part storing the data from path
:rtype: subclasses of email.mime.base.MIMEBase
"""
with open(path, 'rb') as f:
content = f.read()
if not ctype:
ctype = guess_mimetype(content)
# libmagic < 5.12 incorrectly detects excel/powerpoint files as
# 'application/msword' (see #179 and #186 in libmagic bugtracker)
# This is a workaround, based on file extension, useful as long
# as distributions still ship libmagic 5.11.
if (ctype == 'application/msword' and
not libmagic_version_at_least(513)):
mimetype, _ = mimetypes.guess_type(path)
if mimetype:
ctype = mimetype
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
part = MIMEText(content.decode(guess_encoding(content), 'replace'),
_subtype=subtype,
_charset='utf-8')
elif maintype == 'image':
part = MIMEImage(content, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(content, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(content)
# Encode the payload using Base64
email.encoders.encode_base64(part)
# Set the filename parameter
if not filename:
filename = os.path.basename(path)
part.add_header('Content-Disposition', 'attachment',
filename=filename)
return part
def shell_quote(text):
"""Escape the given text for passing it to the shell for interpretation.
The resulting string will be parsed into one "word" (in the sense used in
the shell documentation, see sh(1)) by the shell.
:param text: the text to quote
:type text: str
:returns: the quoted text
:rtype: str
"""
return "'%s'" % text.replace("'", """'"'"'""")
def humanize_size(size):
"""Create a nice human readable representation of the given number
(understood as bytes) using the "KiB" and "MiB" suffixes to indicate
kibibytes and mebibytes. A kibibyte is defined as 1024 bytes (as opposed to
a kilobyte which is 1000 bytes) and a mibibyte is 1024**2 bytes (as opposed
to a megabyte which is 1000**2 bytes).
:param size: the number to convert
:type size: int
:returns: the human readable representation of size
:rtype: str
"""
for factor, format_string in ((1, '%i'),
(1024, '%iKiB'),
(1024 * 1024, '%.1fMiB')):
if size / factor < 1024:
return format_string % (size / factor)
return format_string % (size / factor)
def parse_mailcap_nametemplate(tmplate='%s'):
"""this returns a prefix and suffix to be used
in the tempfile module for a given mailcap nametemplate string"""
nt_list = tmplate.split('%s')
template_prefix = ''
template_suffix = ''
if len(nt_list) == 2:
template_suffix = nt_list[1]
template_prefix = nt_list[0]
else:
template_suffix = tmplate
return (template_prefix, template_suffix)
def parse_mailto(mailto_str):
"""
Interpret mailto-string
:param mailto_str: the string to interpret. Must conform to :rfc:2368.
:type mailto_str: str
:return: the header fields and the body found in the mailto link as a tuple
of length two
:rtype: tuple(dict(str->list(str)), str)
"""
if mailto_str.startswith('mailto:'):
import urllib
to_str, parms_str = mailto_str[7:].partition('?')[::2]
headers = {}
body = u''
to = urllib.unquote(to_str)
if to:
headers['To'] = [to]
for s in parms_str.split('&'):
key, value = s.partition('=')[::2]
key = key.capitalize()
if key == 'Body':
body = urllib.unquote(value)
elif value:
headers[key] = [urllib.unquote(value)]
return (headers, body)
else:
return (None, None)
def mailto_to_envelope(mailto_str):
"""
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
"""
from alot.db.envelope import Envelope
headers, body = parse_mailto(mailto_str)
return Envelope(bodytext=body, headers=headers)
def RFC3156_canonicalize(text):
"""
Canonicalizes plain text (MIME-encoded usually) according to RFC3156.
This function works as follows (in that order):
1. Convert all line endings to \\\\r\\\\n (DOS line endings).
2. Ensure the text ends with a newline (\\\\r\\\\n).
3. Encode all occurences of "From " at the beginning of a line
to "From=20" in order to prevent other mail programs to replace
this with "> From" (to avoid MBox conflicts) and thus invalidate
the signature.
:param text: text to canonicalize (already encoded as quoted-printable)
:rtype: str
"""
text = re.sub("\r?\n", "\r\n", text)
if not text.endswith("\r\n"):
text += "\r\n"
text = re.sub("^From ", "From=20", text, flags=re.MULTILINE)
return text
def email_as_string(mail):
"""
Converts the given message to a string, without mangling "From" lines
(like as_string() does).
:param mail: email to convert to string
:rtype: str
"""
fp = StringIO()
g = Generator(fp, mangle_from_=False, maxheaderlen=78)
g.flatten(mail)
as_string = RFC3156_canonicalize(fp.getvalue())
if isinstance(mail, MIMEMultipart):
# Get the boundary for later
boundary = mail.get_boundary()
# Workaround for http://bugs.python.org/issue14983:
# Insert a newline before the outer mail boundary so that other mail
# clients can verify the signature when sending an email which contains
# attachments.
as_string = re.sub(r'--(\r\n)--' + boundary,
r'--\g<1>\g<1>--' + boundary,
as_string, flags=re.MULTILINE)
return as_string
| fnurl/alot | alot/helper.py | Python | gpl-3.0 | 20,813 |
from pytest import raises
from pyglet.window.key import SPACE, RETURN
from pyglet_pages.controls import Button
class Works(Exception):
pass
class CustomButton(Button):
def activate(self, symbol, modifiers):
raise Works()
def test_default_button():
b = Button('Test')
with raises(NotImplementedError):
b.on_key_press(RETURN, 0)
def test_button():
b = CustomButton('Test')
with raises(Works):
b.on_key_press(RETURN, 0)
| chrisnorman7/pyglet-pages | tests/button_test.py | Python | mpl-2.0 | 505 |
# domahes
a = [1, -20, 38, 0, 44]
b = [88, -20, 48, 4, 33, 2]
if len(a) > len(b):
x = a
else:
x = b
x1 = int(len(x))
x0 = []
for i in x1:
if a[i-1] < b[i-1]:
x0.add(a[i-1])
if a[i-1] > b[i-1]:
x0.add(b[i-1])
print(x0)
| domahes88/domahes | dop - 2 - n1.py | Python | apache-2.0 | 250 |
#
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible import constants as C
from ansible.module_utils._text import to_text, to_bytes
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.netconf import NetconfBase
from ansible.plugins.netconf import ensure_connected
try:
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHUnknownHostError
from ncclient.xml_ import to_ele, to_xml, new_ele
except ImportError:
raise AnsibleError("ncclient is not installed")
class Netconf(NetconfBase):
def get_text(self, ele, tag):
try:
return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip()
except AttributeError:
pass
def get_device_info(self):
device_info = dict()
device_info['network_os'] = 'junos'
ele = new_ele('get-software-information')
data = self.execute_rpc(to_xml(ele))
reply = to_ele(data)
sw_info = reply.find('.//software-information')
device_info['network_os_version'] = self.get_text(sw_info, 'junos-version')
device_info['network_os_hostname'] = self.get_text(sw_info, 'host-name')
device_info['network_os_model'] = self.get_text(sw_info, 'product-model')
return device_info
@ensure_connected
def execute_rpc(self, name):
"""RPC to be execute on remote device
:name: Name of rpc in string format"""
return self.rpc(name)
@ensure_connected
def load_configuration(self, *args, **kwargs):
"""Loads given configuration on device
:format: Format of configuration (xml, text, set)
:action: Action to be performed (merge, replace, override, update)
:target: is the name of the configuration datastore being edited
:config: is the configuration in string format."""
if kwargs.get('config'):
if kwargs.get('format', 'xml') == 'xml':
kwargs['config'] = to_ele(kwargs['config'])
try:
return self.m.load_configuration(*args, **kwargs).data_xml
except RPCError as exc:
raise Exception(to_xml(exc.xml))
def get_capabilities(self):
result = dict()
result['rpc'] = self.get_base_rpc() + ['commit', 'discard_changes', 'validate', 'lock', 'unlock', 'copy_copy',
'execute_rpc', 'load_configuration', 'get_configuration', 'command',
'reboot', 'halt']
result['network_api'] = 'netconf'
result['device_info'] = self.get_device_info()
result['server_capabilities'] = [c for c in self.m.server_capabilities]
result['client_capabilities'] = [c for c in self.m.client_capabilities]
result['session_id'] = self.m.session_id
result['device_operations'] = self.get_device_operations(result['server_capabilities'])
return json.dumps(result)
@staticmethod
def guess_network_os(obj):
try:
m = manager.connect(
host=obj._play_context.remote_addr,
port=obj._play_context.port or 830,
username=obj._play_context.remote_user,
password=obj._play_context.password,
key_filename=obj._play_context.private_key_file,
hostkey_verify=C.HOST_KEY_CHECKING,
look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
allow_agent=obj._play_context.allow_agent,
timeout=obj._play_context.timeout
)
except SSHUnknownHostError as exc:
raise AnsibleConnectionFailure(str(exc))
guessed_os = None
for c in m.server_capabilities:
if re.search('junos', c):
guessed_os = 'junos'
m.close_session()
return guessed_os
@ensure_connected
def get_configuration(self, *args, **kwargs):
"""Retrieve all or part of a specified configuration.
:format: format in configuration should be retrieved
:filter: specifies the portion of the configuration to retrieve
(by default entire configuration is retrieved)"""
return self.m.get_configuration(*args, **kwargs).data_xml
@ensure_connected
def compare_configuration(self, *args, **kwargs):
"""Compare configuration
:rollback: rollback id"""
return self.m.compare_configuration(*args, **kwargs).data_xml
@ensure_connected
def halt(self):
"""reboot the device"""
return self.m.halt().data_xml
@ensure_connected
def reboot(self):
"""reboot the device"""
return self.m.reboot().data_xml
| fxfitz/ansible | lib/ansible/plugins/netconf/junos.py | Python | gpl-3.0 | 5,514 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import remove_end
class CharlieRoseIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?charlierose\.com/(?:video|episode)(?:s|/player)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://charlierose.com/videos/27996',
'md5': 'fda41d49e67d4ce7c2411fd2c4702e09',
'info_dict': {
'id': '27996',
'ext': 'mp4',
'title': 'Remembering Zaha Hadid',
'thumbnail': r're:^https?://.*\.jpg\?\d+',
'description': 'We revisit past conversations with Zaha Hadid, in memory of the world renowned Iraqi architect.',
'subtitles': {
'en': [{
'ext': 'vtt',
}],
},
},
}, {
'url': 'https://charlierose.com/videos/27996',
'only_matching': True,
}, {
'url': 'https://charlierose.com/episodes/30887?autoplay=true',
'only_matching': True,
}]
_PLAYER_BASE = 'https://charlierose.com/video/player/%s'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(self._PLAYER_BASE % video_id, video_id)
title = remove_end(self._og_search_title(webpage), ' - Charlie Rose')
info_dict = self._parse_html5_media_entries(
self._PLAYER_BASE % video_id, webpage, video_id,
m3u8_entry_protocol='m3u8_native')[0]
self._sort_formats(info_dict['formats'])
self._remove_duplicate_formats(info_dict['formats'])
info_dict.update({
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
})
return info_dict
| valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/charlierose.py | Python | gpl-3.0 | 1,554 |
# -*- coding: utf-8 -*-
#
# Copyright 2012 - 2013 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import logging
import numpy as np
# setup the logger
logger = logging.getLogger('log')
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
#
# basic functionality: initialize, start, stop, clear
#
def initialize(shell, **kwargs):
# setup the logger
sh = logging.StreamHandler(kwargs['messages'])
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
# setup the python shell
shell.interact(kwargs.copy())
clear(**kwargs)
def clear(messages, plot_1, plot_2, multiple_mu_initial, multiple_mu_final, **kwargs):
plot_1.clear()
messages.clear()
plot_1.set_plot_properties(
title='Logistic Map',
x_label='time',
y_label='x',
x_scale='linear',
y_scale='linear',
aspect_ratio='auto')
plot_2.set_plot_properties(
title='Logistic Map',
x_label=r'$\mu$',
y_label='x',
x_scale='linear',
y_scale='linear',
tight_autoscale=True,
aspect_ratio='auto',
dpi=300)
plot_1.new_curve('tx', memory='growable', animated=True, line_color='blue')
plot_2.new_image('map', colormap='Greys', animated=False,
extent=(0, multiple_mu_final.value, 0.0, 1.0))
#
# run: the simulation
#
def run_single(single_x0, single_mu, single_N, stop, messages, plot_1, plot_2, **kwargs):
x0 = single_x0.value
mu = single_mu.value
N = single_N.value
# allocate data arrays
xs = np.zeros(N)
ts = np.arange(N)
# the calculation
logger.info('starting calculation')
xs[0] = x0
for i in range(N-1):
xs[i+1] = mu*xs[i]*(1-xs[i])
# plot all the data at the end
data = np.column_stack((ts, xs))
plot_1.set_data('tx', data, rescale=True)
# reset the stop button in case it was pushed
stop.value = False
logger.info('done')
def run_multiple(multiple_mu_initial, multiple_mu_final, multiple_mu_N_steps,
multiple_N_used, multiple_N_total, multiple_N_bins, multiple_x0,
stop, messages, plot_1, plot_2, **kwargs):
x0 = multiple_x0.value
mu_initial = multiple_mu_initial.value
mu_final = multiple_mu_final.value
N_mu = multiple_mu_N_steps.value
N_total = multiple_N_total.value
N_used = multiple_N_used.value
N_bins = multiple_N_bins.value
# allocate data arrays
xs = np.zeros(N_total)
ts = np.arange(N_total)
image_data = np.zeros((N_bins, N_mu), dtype=np.uint8)
plot_2.set_data('map', image_data)
# the calculation
logger.info('starting calculation')
data = np.column_stack((ts, xs))
plot_1.set_data('tx', data, rescale=True)
mus = np.linspace(mu_initial, mu_final, N_mu)
for n in range(N_mu):
mu = mus[n]
xs[0] = x0
for i in range(N_total-1):
xs[i+1] = mu*xs[i]*(1-xs[i])
mu_data = np.histogram(xs[-N_used:], bins=N_bins, range=(0.0, 1.0))[0]
mu_data = np.clip(mu_data, 0, 1)
image_data[::-1,n] += mu_data
if (n % 10) == 0:
# update plot
data = np.column_stack((ts, xs))
plot_1.set_data('tx', data)
# update image
plot_2.set_data('map', image_data)
if stop.value: break
# update plot
data = np.column_stack((ts, xs))
plot_1.set_data('tx', data)
# update image
plot_2.set_data('map', image_data)
# reset the stop button in case it was pushed5
stop.value = False
logger.info('done')
| dursobr/Pythics | pythics/examples/logistic_map.py | Python | gpl-3.0 | 4,362 |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_eip
short_description: associate an EC2 elastic IP with an instance.
description:
- This module associates AWS EC2 elastic IP addresses with instances
version_added: "1.4"
options:
device_id:
description:
- The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id.
required: false
aliases: [ instance_id ]
version_added: "2.0"
public_ip:
description:
- The elastic IP address to associate with the instance.
- If absent, allocate a new address
required: false
state:
description:
- If present, associate the IP with the instance.
- If absent, disassociate the IP with the instance.
required: false
choices: ['present', 'absent']
default: present
region:
description:
- the EC2 region to use
required: false
default: null
aliases: [ ec2_region ]
in_vpc:
description:
- allocate an EIP inside a VPC or not
required: false
default: false
version_added: "1.4"
reuse_existing_ip_allowed:
description:
- Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one.
required: false
default: false
version_added: "1.6"
release_on_disassociation:
description:
- whether or not to automatically release the EIP when it is disassociated
required: false
default: false
version_added: "2.0"
extends_documentation_fragment: aws
author: "Lorin Hochstein (@lorin) <lorin@nimbisservices.com>"
author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
notes:
- This module will return C(public_ip) on success, which will contain the
public IP address associated with the instance.
- There may be a delay between the time the Elastic IP is assigned and when
the cloud instance is reachable via the new address. Use wait_for and
pause to delay further playbook execution until the instance is reachable,
if necessary.
- This module returns multiple changed statuses on disassociation or release.
It returns an overall status based on any changes occuring. It also returns
individual changed statuses for disassociation and release.
'''
EXAMPLES = '''
- name: associate an elastic IP with an instance
ec2_eip: device_id=i-1212f003 ip=93.184.216.119
- name: associate an elastic IP with a device
ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119
- name: disassociate an elastic IP from an instance
ec2_eip: device_id=i-1212f003 ip=93.184.216.119 state=absent
- name: disassociate an elastic IP with a device
ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 state=absent
- name: allocate a new elastic IP and associate it with an instance
ec2_eip: device_id=i-1212f003
- name: allocate a new elastic IP without associating it to anything
action: ec2_eip
register: eip
- name: output the IP
debug: msg="Allocated IP is {{ eip.public_ip }}"
- name: another way of allocating an elastic IP without associating it to anything
ec2_eip: state='present'
- name: provision new instances with ec2
ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes'''
''' group=webserver count=3
register: ec2
- name: associate new elastic IPs with each of the instances
ec2_eip: "device_id={{ item }}"
with_items: ec2.instance_ids
- name: allocate a new elastic IP inside a VPC in us-west-2
ec2_eip: region=us-west-2 in_vpc=yes
register: eip
- name: output the IP
debug: msg="Allocated IP inside a VPC is {{ eip.public_ip }}"
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class EIPException(Exception):
pass
def associate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True):
if address_is_associated_with_device(ec2, address, device_id, isinstance):
return {'changed': False}
# If we're in check mode, nothing else to do
if not check_mode:
if isinstance:
if address.domain == "vpc":
res = ec2.associate_address(device_id, allocation_id=address.allocation_id)
else:
res = ec2.associate_address(device_id, public_ip=address.public_ip)
else:
res = ec2.associate_address(network_interface_id=device_id, allocation_id=address.allocation_id)
if not res:
raise EIPException('association failed')
return {'changed': True}
def disassociate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True):
if not address_is_associated_with_device(ec2, address, device_id, isinstance):
return {'changed': False}
# If we're in check mode, nothing else to do
if not check_mode:
if address.domain == 'vpc':
res = ec2.disassociate_address(
association_id=address.association_id)
else:
res = ec2.disassociate_address(public_ip=address.public_ip)
if not res:
raise EIPException('disassociation failed')
return {'changed': True}
def _find_address_by_ip(ec2, public_ip):
try:
return ec2.get_all_addresses([public_ip])[0]
except boto.exception.EC2ResponseError as e:
if "Address '{}' not found.".format(public_ip) not in e.message:
raise
def _find_address_by_device_id(ec2, device_id, isinstance=True):
if isinstance:
addresses = ec2.get_all_addresses(None, {'instance-id': device_id})
else:
addresses = ec2.get_all_addresses(None, {'network-interface-id': device_id})
if addresses:
return addresses[0]
def find_address(ec2, public_ip, device_id, isinstance=True):
""" Find an existing Elastic IP address """
if public_ip:
return _find_address_by_ip(ec2, public_ip)
elif device_id and isinstance:
return _find_address_by_device_id(ec2, device_id)
elif device_id:
return _find_address_by_device_id(ec2, device_id, isinstance=False)
def address_is_associated_with_device(ec2, address, device_id, isinstance=True):
""" Check if the elastic IP is currently associated with the device """
address = ec2.get_all_addresses(address.public_ip)
if address:
if isinstance:
return address and address[0].instance_id == device_id
else:
return address and address[0].network_interface_id == device_id
return False
def allocate_address(ec2, domain, reuse_existing_ip_allowed):
""" Allocate a new elastic IP address (when needed) and return it """
if reuse_existing_ip_allowed:
domain_filter = {'domain': domain or 'standard'}
all_addresses = ec2.get_all_addresses(filters=domain_filter)
if domain == 'vpc':
unassociated_addresses = [a for a in all_addresses
if not a.association_id]
else:
unassociated_addresses = [a for a in all_addresses
if not a.instance_id]
if unassociated_addresses:
return unassociated_addresses[0]
return ec2.allocate_address(domain=domain)
def release_address(ec2, address, check_mode):
""" Release a previously allocated elastic IP address """
# If we're in check mode, nothing else to do
if not check_mode:
if not address.release():
EIPException('release failed')
return {'changed': True}
def find_device(ec2, device_id, isinstance=True):
""" Attempt to find the EC2 instance and return it """
if isinstance:
try:
reservations = ec2.get_all_reservations(instance_ids=[device_id])
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if len(reservations) == 1:
instances = reservations[0].instances
if len(instances) == 1:
return instances[0]
else:
try:
interfaces = ec2.get_all_network_interfaces(network_interface_ids=[device_id])
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if len(interfaces) == 1:
return interfaces[0]
raise EIPException("could not find instance" + device_id)
def ensure_present(ec2, domain, address, device_id,
reuse_existing_ip_allowed, check_mode, isinstance=True):
changed = False
# Return the EIP object since we've been given a public IP
if not address:
if check_mode:
return {'changed': True}
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
changed = True
if device_id:
# Allocate an IP for instance since no public_ip was provided
if isinstance:
instance = find_device(ec2, device_id)
if reuse_existing_ip_allowed:
if len(instance.vpc_id) > 0 and domain is None:
raise EIPException("You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc")
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_device(ec2, address, device_id,
check_mode)
else:
instance = find_device(ec2, device_id, isinstance=False)
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_device(ec2, address, device_id,
check_mode, isinstance=False)
if instance.vpc_id:
domain = 'vpc'
changed = changed or assoc_result['changed']
return {'changed': changed, 'public_ip': address.public_ip}
def ensure_absent(ec2, domain, address, device_id, check_mode, isinstance=True):
if not address:
return {'changed': False}
# disassociating address from instance
if device_id:
if isinstance:
return disassociate_ip_and_device(ec2, address, device_id,
check_mode)
else:
return disassociate_ip_and_device(ec2, address, device_id,
check_mode, isinstance=False)
# releasing address
else:
return release_address(ec2, address, check_mode)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
device_id=dict(required=False, aliases=['instance_id']),
public_ip=dict(required=False, aliases=['ip']),
state=dict(required=False, default='present',
choices=['present', 'absent']),
in_vpc=dict(required=False, type='bool', default=False),
reuse_existing_ip_allowed=dict(required=False, type='bool',
default=False),
release_on_disassociation=dict(required=False, type='bool', default=False),
wait_timeout=dict(default=300),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
ec2 = ec2_connect(module)
device_id = module.params.get('device_id')
instance_id = module.params.get('instance_id')
public_ip = module.params.get('public_ip')
state = module.params.get('state')
in_vpc = module.params.get('in_vpc')
domain = 'vpc' if in_vpc else None
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
release_on_disassociation = module.params.get('release_on_disassociation')
if instance_id:
warnings = ["instance_id is no longer used, please use device_id going forward"]
is_instance = True
device_id = instance_id
else:
if device_id and device_id.startswith('i-'):
is_instance = True
elif device_id:
is_instance = False
try:
if device_id:
address = find_address(ec2, public_ip, device_id, isinstance=is_instance)
else:
address = False
if state == 'present':
if device_id:
result = ensure_present(ec2, domain, address, device_id,
reuse_existing_ip_allowed,
module.check_mode, isinstance=is_instance)
else:
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
result = {'changed': True, 'public_ip': address.public_ip}
else:
if device_id:
disassociated = ensure_absent(ec2, domain, address, device_id, module.check_mode, isinstance=is_instance)
if release_on_disassociation and disassociated['changed']:
released = release_address(ec2, address, module.check_mode)
result = {'changed': True, 'disassociated': disassociated, 'released': released}
else:
result = {'changed': disassociated['changed'], 'disassociated': disassociated, 'released': {'changed': False}}
else:
address = find_address(ec2, public_ip, None)
released = release_address(ec2, address, module.check_mode)
result = {'changed': released['changed'], 'disassociated': {'changed': False}, 'released': released}
except (boto.exception.EC2ResponseError, EIPException) as e:
module.fail_json(msg=str(e))
if instance_id:
result['warnings'] = warnings
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.ec2 import * # noqa
if __name__ == '__main__':
main()
| garyjyao1/ansible | lib/ansible/modules/core/cloud/amazon/ec2_eip.py | Python | gpl-3.0 | 14,390 |
from django_tex.environment import environment
def hhmm_format(value):
total_seconds = value.total_seconds()
hours, remainder = divmod(total_seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:n}:{:02n}".format(hours, minutes)
def test_environment(**options):
env = environment(**options)
env.filters.update({"hhmm_format": hhmm_format})
return env
| weinbusch/django-tex | tests/environment.py | Python | mit | 395 |
""" Vanilla RNN
Parallelizes scan over sequences by using mini-batches.
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import cPickle as pickle
import random
logger = logging.getLogger(__name__)
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', only_output_after=False):
self.input = input
self.activation = activation
self.output_type = output_type
self.only_output_after = only_output_after
self.batch_size = T.iscalar()
# theta is a vector of all trainable parameters
# it represents the value of W, W_in, W_out, h0, bh, by
theta_shape = n_hidden ** 2 + n_in * n_hidden + n_hidden * n_out + \
n_hidden + n_hidden + n_out
self.theta = theano.shared(value=np.zeros(theta_shape,
dtype=theano.config.floatX))
# Parameters are reshaped views of theta
param_idx = 0 # pointer to somewhere along parameter vector
# recurrent weights as a shared variable
self.W = self.theta[param_idx:(param_idx + n_hidden ** 2)].reshape(
(n_hidden, n_hidden))
self.W.name = 'W'
'''W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-0.01, high=0.01),
dtype=theano.config.floatX)'''
W_init = np.identity(n_hidden, dtype=theano.config.floatX)
param_idx += n_hidden ** 2
# input to hidden layer weights
self.W_in = self.theta[param_idx:(param_idx + n_in * \
n_hidden)].reshape((n_in, n_hidden))
self.W_in.name = 'W_in'
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-0.01, high=0.01),
dtype=theano.config.floatX)
param_idx += n_in * n_hidden
# hidden to output layer weights
self.W_out = self.theta[param_idx:(param_idx + n_hidden * \
n_out)].reshape((n_hidden, n_out))
self.W_out.name = 'W_out'
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-0.01, high=0.01),
dtype=theano.config.floatX)
param_idx += n_hidden * n_out
self.h0 = self.theta[param_idx:(param_idx + n_hidden)]
self.h0.name = 'h0'
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
param_idx += n_hidden
self.bh = self.theta[param_idx:(param_idx + n_hidden)]
self.bh.name = 'bh'
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
param_idx += n_hidden
self.by = self.theta[param_idx:(param_idx + n_out)]
self.by.name = 'by'
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
param_idx += n_out
assert(param_idx == theta_shape)
# for convenience
self.params = [self.W, self.W_in, self.W_out, self.h0, self.bh,
self.by]
# shortcut to norms (for monitoring)
self.l2_norms = {}
for param in self.params:
self.l2_norms[param] = T.sqrt(T.sum(param ** 2))
# initialize parameters
# DEBUG_MODE gives division by zero error when we leave parameters
# as zeros
self.theta.set_value(np.concatenate([x.ravel() for x in
(W_init, W_in_init, W_out_init, h0_init, bh_init, by_init)]))
self.theta_update = theano.shared(
value=np.zeros(theta_shape, dtype=theano.config.floatX))
# recurrent function (using tanh activation function) and arbitrary output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
# Note the implementation of weight-sharing h0 across variable-size
# batches using T.ones multiplying h0
# Alternatively, T.alloc approach is more robust
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[T.alloc(self.h0, self.input.shape[1],
n_hidden), None])
# outputs_info=[T.ones(shape=(self.input.shape[1],
# self.h0.shape[0])) * self.h0, None])
# sometimes we only care about the final output
# a matrix (batch_size, n_out)
if only_output_after:
self.y_pred = self.y_pred[-1]
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
#
# T.nnet.softmax will not operate on T.tensor3 types, only matrices
# We take our n_steps x n_seq x n_classes output from the net
# and reshape it into a (n_steps * n_seq) x n_classes matrix
# apply softmax, then reshape back
if self.only_output_after:
self.p_y_given_x = T.nnet.softmax(self.y_pred)
else:
y_p = self.y_pred
y_p_m = T.reshape(y_p, (y_p.shape[0] * y_p.shape[1], -1))
y_p_s = T.nnet.softmax(y_p_m)
self.p_y_given_x = T.reshape(y_p_s, y_p.shape)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
#
# Theano's advanced indexing is limited
# therefore we reshape our n_steps x n_seq x n_classes tensor3 of probs
# to a (n_steps * n_seq) x n_classes matrix of probs
# so that we can use advanced indexing (i.e. get the probs which
# correspond to the true class)
# the labels y also must be flattened when we do this to use the
# advanced indexing
if self.only_output_after:
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
else:
p_y = self.p_y_given_x
p_y_m = T.reshape(p_y, (p_y.shape[0] * p_y.shape[1], -1))
y_f = y.flatten(ndim=1)
return -T.mean(T.log(p_y_m)[T.arange(p_y_m.shape[0]), y_f])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
print "ydims", y.ndim, self.y_out.ndim
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, batch_size=100, L1_reg=0.00, L2_reg=0.00,
learning_rate_decay=1,
activation='tanh', output_type='real', final_momentum=0.9,
initial_momentum=0.5, momentum_switchover=5,
grad_max=10,
only_output_after=False,
snapshot_every=None, snapshot_path='/tmp'):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.batch_size = int(batch_size)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.grad_max = grad_max
self.only_output_after = only_output_after
if snapshot_every is not None:
self.snapshot_every = int(snapshot_every)
else:
self.snapshot_every = None
self.snapshot_path = snapshot_path
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.tensor3(name='x')
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.tensor3(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.tensor3(name='y', dtype='int32')
elif self.output_type == 'softmax': # now it is a matrix (T x n_seq)
if self.only_output_after:
self.y = T.vector(name='y', dtype='int32')
else:
self.y = T.matrix(name='y', dtype='int32')
else:
raise NotImplementedError
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
only_output_after=self.only_output_after)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy, borrow=True):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX),
borrow=True)
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX),
borrow=True)
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
theta = self.rnn.theta.get_value()
state = (params, theta)
return state
def _set_weights(self, theta):
""" Set fittable parameters from weights sequence.
"""
self.rnn.theta.set_value(theta)
def __setstate__(self, state):
""" Set parameters from state sequence.
"""
params, theta = state
self.set_params(**params)
self.ready()
self._set_weights(theta)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logger.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logger.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def optional_output(self, train_set_x, show_norms=True, show_output=True):
""" Produces some debugging output. """
if show_norms:
norm_output = []
for param in self.rnn.params:
norm_output.append('%s: %6.4f' % (param.name,
self.get_norms[param]()))
logger.info("norms: {" + ', '.join(norm_output) + "}")
if show_output:
# show output for a single case
if self.output_type == 'binary':
output_fn = self.predict_proba
else:
output_fn = self.predict
logger.info("sample output: " + \
str(output_fn(train_set_x.get_value(
borrow=True)[:, 0, :][:, np.newaxis, :]).flatten()))
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validate_every=100, optimizer='sgd', compute_zero_one=False,
show_norms=True, show_output=True):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (T x n_in)
Y_train : ndarray (T x n_out)
validation_frequency : int
in terms of number of epochs
optimizer : string
Optimizer type.
Possible values:
'sgd' : batch stochastic gradient descent
'cg' : nonlinear conjugate gradient algorithm
(scipy.optimize.fmin_cg)
'bfgs' : quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (scipy.optimize.fmin_bfgs)
'l_bfgs_b' : Limited-memory BFGS (scipy.optimize.fmin_l_bfgs_b)
compute_zero_one : bool
in the case of binary output, compute zero-one error in addition to
cross-entropy error
show_norms : bool
Show L2 norms of individual parameter groups while training.
show_output : bool
Show the model output on first training case while training.
"""
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
if compute_zero_one:
assert(self.output_type == 'binary' \
or self.output_type == 'softmax')
# compute number of minibatches for training
# note that cases are the second dimension, not the first
n_train = train_set_x.get_value(borrow=True).shape[1]
n_train_batches = int(np.ceil(1.0 * n_train / self.batch_size))
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[1]
n_test_batches = int(np.ceil(1.0 * n_test / self.batch_size))
#validate_every is specified in terms of epochs
validation_frequency = validate_every * n_train_batches
######################
# BUILD ACTUAL MODEL #
######################
logger.info('... building the model')
index = T.lscalar('index') # index to a [mini]batch
n_ex = T.lscalar('n_ex') # total number of examples
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
print "building cost graph"
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
print "cost done"
# Proper implementation of variable-batch size evaluation
# Note that classifier.errors() returns the mean error
# But the last batch may be a smaller size
# So we keep around the effective_batch_size (whose last element may
# be smaller than the rest)
# And weight the reported error by the batch_size when we average
# Also, by keeping batch_start and batch_stop as symbolic variables,
# we make the theano function easier to read
batch_start = index * self.batch_size
batch_stop = T.minimum(n_ex, (index + 1) * self.batch_size)
effective_batch_size = batch_stop - batch_start
get_batch_size = theano.function(inputs=[index, n_ex],
outputs=effective_batch_size)
def symbY(sharedY, start, stop, final_only):
if final_only:
return sharedY[batch_start:batch_stop]
else:
return sharedY[:, batch_start:batch_stop]
print "compute train error"
compute_train_error = theano.function(inputs=[index, n_ex],
outputs=self.rnn.loss(self.y),
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: symbY(train_set_y, batch_start, batch_stop, self.only_output_after)},
mode=mode)
print "done compute train error"
if compute_zero_one:
compute_train_zo = theano.function(inputs=[index, n_ex],
outputs=self.rnn.errors(self.y),
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: symbY(train_set_y, batch_start, batch_stop, self.only_output_after)},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, n_ex],
outputs=self.rnn.loss(self.y),
givens={self.x: test_set_x[:, batch_start:batch_stop],
self.y: symbY(test_set_y, batch_start, batch_stop, self.only_output_after)},
mode=mode)
if compute_zero_one:
compute_test_zo = theano.function(inputs=[index, n_ex],
outputs=self.rnn.errors(self.y),
givens={self.x: test_set_x[:, batch_start:batch_stop],
self.y: symbY(test_set_y, batch_start, batch_stop, self.only_output_after)},
mode=mode)
self.get_norms = {}
for param in self.rnn.params:
self.get_norms[param] = theano.function(inputs=[],
outputs=self.rnn.l2_norms[param], mode=mode)
# compute the gradient of cost with respect to theta using BPTT
gtheta = T.grad(cost, self.rnn.theta)
if optimizer == 'sgd':
updates = {}
theta = self.rnn.theta
theta_update = self.rnn.theta_update
# careful here, update to the shared variable
# cannot depend on an updated other shared variable
# since updates happen in parallel
# so we need to be explicit
upd = mom * theta_update - l_r * gtheta
updates[theta_update] = upd
updates[theta] = theta + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, n_ex, l_r, mom],
outputs=cost,
updates=updates,
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: symbY(train_set_y,batch_start,batch_stop,self.only_output_after)},
mode=mode)
###############
# TRAIN MODEL #
###############
logger.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
for minibatch_idx in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_idx, n_train,
self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train_batches + minibatch_idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
this_train_loss = np.average(train_losses,
weights=train_batch_sizes)
if compute_zero_one:
train_zero_one = [compute_train_zo(i, n_train)
for i in xrange(n_train_batches)]
this_train_zero_one = np.average(train_zero_one,
weights=train_batch_sizes)
if self.interactive:
test_losses = [compute_test_error(i, n_test)
for i in xrange(n_test_batches)]
test_batch_sizes = [get_batch_size(i, n_test)
for i in xrange(n_test_batches)]
this_test_loss = np.average(test_losses,
weights=test_batch_sizes)
if compute_zero_one:
test_zero_one = [compute_test_zo(i, n_test)
for i in xrange(n_test_batches)]
this_test_zero_one = np.average(test_zero_one,
weights=test_batch_sizes)
if compute_zero_one:
logger.info('epoch %i, mb %i/%i, tr loss %f, '
'tr zo %f, te loss %f '
'te zo %f lr: %f' % \
(epoch, minibatch_idx + 1,
n_train_batches,
this_train_loss, this_train_zero_one,
this_test_loss, this_test_zero_one,
self.learning_rate))
else:
logger.info('epoch %i, mb %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, minibatch_idx + 1, n_train_batches,
this_train_loss, this_test_loss,
self.learning_rate))
else:
if compute_zero_one:
logger.info('epoch %i, mb %i/%i, train loss %f'
' train zo %f '
'lr: %f' % (epoch,
minibatch_idx + 1,
n_train_batches,
this_train_loss,
this_train_zero_one,
self.learning_rate))
else:
logger.info('epoch %i, mb %i/%i, train loss %f'
' lr: %f' % (epoch,
minibatch_idx + 1,
n_train_batches,
this_train_loss,
self.learning_rate))
self.optional_output(train_set_x, show_norms,
show_output)
self.learning_rate *= self.learning_rate_decay
if self.snapshot_every is not None:
if (epoch + 1) % self.snapshot_every == 0:
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s-snapshot-%d.pkl' % (class_name,
date_str, epoch + 1)
fabspath = os.path.join(self.snapshot_path, fname)
self.save(fpath=fabspath)
elif optimizer == 'cg' or optimizer == 'bfgs' \
or optimizer == 'l_bfgs_b':
# compile a theano function that returns the cost of a minibatch
batch_cost = theano.function(inputs=[index, n_ex],
outputs=cost,
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: symbY(train_set_y, batch_start, batch_stop, self.only_output_after)},
mode=mode, name="batch_cost")
# compile a theano function that returns the gradient of the
# minibatch with respect to theta
batch_grad = theano.function(inputs=[index, n_ex],
outputs=T.grad(cost, self.rnn.theta),
givens={self.x: train_set_x[:, batch_start:batch_stop],
self.y: symbY(train_set_y, batch_start, batch_stop, self.only_output_after)},
mode=mode, name="batch_grad")
# creates a function that computes the average cost on the training
# set
def train_fn(theta_value):
self.rnn.theta.set_value(theta_value, borrow=True)
train_losses = [batch_cost(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
return np.average(train_losses, weights=train_batch_sizes)
# creates a function that computes the average gradient of cost
# with respect to theta
def train_fn_grad(theta_value):
self.rnn.theta.set_value(theta_value, borrow=True)
train_grads = [batch_grad(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
return np.average(train_grads, weights=train_batch_sizes,
axis=0)
# validation function, prints useful output after each iteration
def callback(theta_value):
self.epoch += 1
if (self.epoch) % validate_every == 0:
self.rnn.theta.set_value(theta_value, borrow=True)
# compute loss on training set
train_losses = [compute_train_error(i, n_train)
for i in xrange(n_train_batches)]
train_batch_sizes = [get_batch_size(i, n_train)
for i in xrange(n_train_batches)]
this_train_loss = np.average(train_losses,
weights=train_batch_sizes)
if compute_zero_one:
train_zero_one = [compute_train_zo(i, n_train)
for i in xrange(n_train_batches)]
this_train_zero_one = np.average(train_zero_one,
weights=train_batch_sizes)
if self.interactive:
test_losses = [compute_test_error(i, n_test)
for i in xrange(n_test_batches)]
test_batch_sizes = [get_batch_size(i, n_test)
for i in xrange(n_test_batches)]
this_test_loss = np.average(test_losses,
weights=test_batch_sizes)
if compute_zero_one:
test_zero_one = [compute_test_zo(i, n_test)
for i in xrange(n_test_batches)]
this_test_zero_one = np.average(test_zero_one,
weights=test_batch_sizes)
if compute_zero_one:
logger.info('epoch %i, tr loss %f, '
'tr zo %f, te loss %f '
'te zo %f' % \
(self.epoch, this_train_loss,
this_train_zero_one, this_test_loss,
this_test_zero_one))
else:
logger.info('epoch %i, tr loss %f, te loss %f' % \
(self.epoch, this_train_loss,
this_test_loss, self.learning_rate))
else:
if compute_zero_one:
logger.info('epoch %i, train loss %f'
', train zo %f ' % \
(self.epoch, this_train_loss,
this_train_zero_one))
else:
logger.info('epoch %i, train loss %f ' % \
(self.epoch, this_train_loss))
self.optional_output(train_set_x, show_norms, show_output)
###############
# TRAIN MODEL #
###############
logger.info('... training')
# using scipy conjugate gradient optimizer
import scipy.optimize
if optimizer == 'cg':
of = scipy.optimize.fmin_cg
elif optimizer == 'bfgs':
of = scipy.optimize.fmin_bfgs
elif optimizer == 'l_bfgs_b':
of = scipy.optimize.fmin_l_bfgs_b
logger.info("Optimizing using %s..." % of.__name__)
start_time = time.clock()
# keep track of epochs externally
# these get updated through callback
self.epoch = 0
# interface to l_bfgs_b is different than that of cg, bfgs
# however, this will be changed in scipy 0.11
# unified under scipy.optimize.minimize
if optimizer == 'cg' or optimizer == 'bfgs':
best_theta = of(
f=train_fn,
x0=self.rnn.theta.get_value(),
# x0=np.zeros(self.rnn.theta.get_value().shape,
# dtype=theano.config.floatX),
fprime=train_fn_grad,
callback=callback,
disp=1,
retall=1,
maxiter=self.n_epochs)
elif optimizer == 'l_bfgs_b':
best_theta, f_best_theta, info = of(
func=train_fn,
x0=self.rnn.theta.get_value(),
fprime=train_fn_grad,
iprint=validate_every,
maxfun=self.n_epochs) # max number of feval
end_time = time.clock()
print "Optimization time: %f" % (end_time - start_time)
else:
raise NotImplementedError
def test_real(n_epochs=1000):
""" Test RNN with real-valued outputs. """
n_hidden = 10
n_in = 5
n_out = 3
n_steps = 10
n_seq = 10 # per batch
n_batches = 10
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_steps, n_seq * n_batches, n_in)
targets = np.zeros((n_steps, n_seq * n_batches, n_out))
targets[1:, :, 0] = seq[:-1, :, 3] # delayed 1
targets[1:, :, 1] = seq[:-1, :, 2] # delayed 1
targets[2:, :, 2] = seq[:-2, :, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.01, learning_rate_decay=0.999,
n_epochs=n_epochs, batch_size=n_seq, activation='tanh',
L2_reg=1e-3)
model.fit(seq, targets, validate_every=100, optimizer='sgd')
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[:, 0, :])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[:, 0, :])
guess = model.predict(seq[:, 0, :][:, np.newaxis, :])
guessed_targets = plt.plot(guess.squeeze(), linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
def test_binary(multiple_out=False, n_epochs=1000, optimizer='cg'):
""" Test RNN with binary outputs. """
n_hidden = 10
n_in = 5
if multiple_out:
n_out = 2
else:
n_out = 1
n_steps = 10
n_seq = 10 # per batch
n_batches = 50
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_steps, n_seq * n_batches, n_in)
targets = np.zeros((n_steps, n_seq * n_batches, n_out))
# whether lag 1 (dim 3) is greater than lag 2 (dim 0)
targets[2:, :, 0] = np.cast[np.int](seq[1:-1, :, 3] > seq[:-2, :, 0])
if multiple_out:
# whether product of lag 1 (dim 4) and lag 1 (dim 2)
# is less than lag 2 (dim 0)
targets[2:, :, 1] = np.cast[np.int](
(seq[1:-1, :, 4] * seq[1:-1, :, 2]) > seq[:-2, :, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.005, learning_rate_decay=0.999,
n_epochs=n_epochs, batch_size=n_seq, activation='tanh',
output_type='binary')
model.fit(seq, targets, validate_every=100, compute_zero_one=True,
optimizer=optimizer)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[:, seq_num, :])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.step(xrange(n_steps), targets[:, seq_num, :],
marker='o')
guess = model.predict_proba(seq[:, seq_num, :][:, np.newaxis, :])
guessed_targets = plt.step(xrange(n_steps), guess.squeeze())
plt.setp(guessed_targets, linestyle='--', marker='d')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_ylim((-0.1, 1.1))
ax2.set_title('solid: true output, dashed: model output (prob)')
def test_softmax(n_epochs=250, optimizer='cg'):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 10 # per batch
n_batches = 50
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_steps, n_seq * n_batches, n_in)
targets = np.zeros((n_steps, n_seq * n_batches), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[2:, :][seq[1:-1, :, 3] > seq[:-2, :, 0] + thresh] = 1
targets[2:, :][seq[1:-1, :, 3] < seq[:-2, :, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.005, learning_rate_decay=0.999,
n_epochs=n_epochs, batch_size=n_seq, activation='relu',
output_type='softmax')
model.fit(seq, targets, validate_every=10, compute_zero_one=True,
optimizer=optimizer)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[:, seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[:, seq_num],
marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[:, seq_num][:, np.newaxis])
guessed_probs = plt.imshow(guess.squeeze().T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
def test_softmax2(n_epochs=250, optimizer='sgd'):
""" Test RNN with a single softmax output after the sequence. """
n_hidden = 50
n_in = 1
n_steps = 50
n_classes = 4
batch_size = 10
n_seq=100*n_classes
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple distributions test
seq = np.zeros((n_seq, n_steps, n_in))
eachSize = (n_seq/n_classes, n_steps, 1)
seq[:n_seq/n_classes] = np.random.uniform(0,1,eachSize) # uniform positive
seq[n_seq/n_classes:2*n_seq/n_classes] = np.random.uniform(-1,0,eachSize) # uniform negativ
seq[2*n_seq/n_classes:3*n_seq/n_classes] = np.random.uniform(1,2,eachSize) # uniform [1,2]
seq[3*n_seq/n_classes:] = np.random.gamma(shape=1.0, size=eachSize) # gamma (mostly between 0 and 3)
targets = np.repeat(np.asarray(range(n_classes)), n_seq/n_classes)
#targets = np.expand_dims(targets, axis=1)
print seq.shape, targets.shape
d = zip(seq, targets)
random.shuffle(d)
seq = np.asarray([i[0] for i in d])
targets = np.asarray([i[1] for i in d])
print seq.shape, targets.shape
seq = seq.transpose(1,0,2)
#targets = targets.transpose(1,0)
print seq.shape, targets.shape
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.00001, batch_size=batch_size, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='relu', grad_max=10,
output_type='softmax', only_output_after=True)
model.fit(seq, targets, validate_every=10, compute_zero_one=True)
seqs = xrange(10)
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
def load_data_np(datafile):
print '... loading data'
# Load the dataset
with open(datafile) as f:
[tr, val, tst] = pickle.load(f)
return tr[0], tr[1]
def test_mnist(n_epochs=250, optimizer='sgd'):
""" Test RNN with softmax outputs on the mnist data set. """
n_hidden = 100
n_in = 1
n_steps = 784
n_classes = 10
batch_size = 20
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# load mnist
from os.path import expanduser
home = expanduser("~")
seq, y = load_data_np(home+"/datasets/mnistSMALL.pkl")
seq = np.expand_dims(seq, axis=2)
#seq, y = seq[:100], y[:100]
n_seq = len(seq)
# steps first for ungodly reasons
seq = seq.transpose(1,0,2)
targets = y
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.0000001, batch_size=batch_size, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='relu', grad_max=10,
output_type='softmax', only_output_after=True)
model.fit(seq, targets, validate_every=10, compute_zero_one=True)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
t0 = time.time()
#test_real(n_epochs=1000)
#test_binary(optimizer='sgd', n_epochs=1000)
#test_softmax(n_epochs=500, optimizer='sgd')
#test_softmax2(n_epochs=500, optimizer='sgd')
test_mnist(n_epochs=500, optimizer='sgd')
print "Elapsed time: %f" % (time.time() - t0)
| ebuchman/theano-rnn | rnn_minibatch.py | Python | bsd-3-clause | 44,781 |
#python
import k3d
import testing
import copy
source_file = "papagayo_example.dat"
setup = testing.setup_scalar_source_test("PapagayoLipsyncReader")
setup.source.frame_rate = 30
setup.source.interpolate = True
setup.source.interpolation_time = 0.2
setup.source.papagayo_file = k3d.filesystem.generic_path(testing.source_path() + "/lipsync/" + source_file)
test_cases = \
[
[0.0,{"rest":1.0}],
[1.98,{"E":0.24000000000000021,"etc":0.75999999999999979}],
[2.0,{"E":0.5,"etc":0.5}],
[4.34,{"E":0.69999999999999463,"MBP":0.30000000000000537}],
]
mouths = ["AI","E","etc","FV","L","MBP","O","rest","U","WQ"]
for test_case in test_cases:
setup.source.time = test_case[0]
source_mouth_value = 0.0
mouths_in_zero = copy.deepcopy(mouths)
for mouth,reference_value in test_case[1].iteritems():
exec("source_mouth_value = setup.source."+mouth)
testing.require_scalar_value(source_mouth_value,reference_value)
mouths_in_zero.remove(mouth)
#Check the other mouths are in zero
for mouth in mouths_in_zero:
exec("source_mouth_value = setup.source."+mouth)
testing.require_scalar_value(source_mouth_value,0.0)
| barche/k3d | tests/double/source.PapagayoLipsyncReader.py | Python | gpl-2.0 | 1,134 |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
def print_words(filename):
f = open(filename, 'rU')
print_words('alice.txt')
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| ssarber/google-python-exercises | basic/wordcount.py | Python | apache-2.0 | 2,205 |
# -*- coding: utf-8 -*-
class OAuthUser:
def __init__(self, access_token, user_id):
self.user_id = user_id
self.access_token = access_token
self.email = None
self.title = None
self.name = None
self.avatar_url = None
self.description = None
def __str__(self):
return (f"OAuthUser(user_id={self.user_id}, "
f" access_token='{self.access_token}'")
| lcgong/alchemy | busiserv/login/user.py | Python | gpl-3.0 | 439 |
import sys
import json
from os import path
from argparse import ArgumentParser
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))) + '/utils/')
from algorithm_utils import set_algorithms_output_data
from health_check_lib import HealthCheckLocalDT
def main(args):
# Parse arguments
sys.argv = args
parser = ArgumentParser()
parser.add_argument('-local_step_dbs', required=True, help='Path to local db.')
args, unknown = parser.parse_known_args()
local_dbs = path.abspath(args.local_step_dbs)
local_out = HealthCheckLocalDT.load(local_dbs)
nodes = {}
nodes["active_nodes"] = local_out.get_data()
# Return the algorithm's output
set_algorithms_output_data(json.dumps(nodes))
if __name__ == '__main__':
main()
| madgik/exareme | Exareme-Docker/src/mip-algorithms/HEALTH_CHECK/global.py | Python | mit | 808 |
import re
filename = 'baladhuri_futuh.txt'
text = open(filename, mode='r', encoding='utf-8').read()
def index_generator(word, text):
juz = 'الجزء:'
safha = 'الصفحة:'
page_regex = juz + r' \d+ ¦ ' + safha + r' \d+'
search_regex = word + r'.+?(' + page_regex + ')'
pagination = re.findall(search_regex, text, re.DOTALL)
return pagination
index = index_generator('فرضة', text)
for page in index:
print(page)
| jedlitools/find-for-me | ex12_index_generator.py | Python | mit | 453 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import unittest
from airflow.models import DAG, DagRun, TaskInstance as TI
from airflow.operators.branch_operator import BaseBranchOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
INTERVAL = datetime.timedelta(hours=12)
class ChooseBranchOne(BaseBranchOperator):
def choose_branch(self, context):
return 'branch_1'
class ChooseBranchOneTwo(BaseBranchOperator):
def choose_branch(self, context):
return ['branch_1', 'branch_2']
class TestBranchOperator(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def setUp(self):
self.dag = DAG('branch_operator_test',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.branch_1 = DummyOperator(task_id='branch_1', dag=self.dag)
self.branch_2 = DummyOperator(task_id='branch_2', dag=self.dag)
self.branch_3 = None
self.branch_op = None
def tearDown(self):
super().tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
self.branch_op = ChooseBranchOne(task_id="make_choice", dag=self.dag)
self.branch_1.set_upstream(self.branch_op)
self.branch_2.set_upstream(self.branch_op)
self.dag.clear()
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(
TI.dag_id == self.dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
# should exist with state None
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise Exception
def test_branch_list_without_dag_run(self):
"""This checks if the BranchOperator supports branching off to a list of tasks."""
self.branch_op = ChooseBranchOneTwo(task_id='make_choice', dag=self.dag)
self.branch_1.set_upstream(self.branch_op)
self.branch_2.set_upstream(self.branch_op)
self.branch_3 = DummyOperator(task_id='branch_3', dag=self.dag)
self.branch_3.set_upstream(self.branch_op)
self.dag.clear()
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with create_session() as session:
tis = session.query(TI).filter(
TI.dag_id == self.dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
expected = {
"make_choice": State.SUCCESS,
"branch_1": State.NONE,
"branch_2": State.NONE,
"branch_3": State.SKIPPED,
}
for ti in tis:
if ti.task_id in expected:
self.assertEqual(ti.state, expected[ti.task_id])
else:
raise Exception
def test_with_dag_run(self):
self.branch_op = ChooseBranchOne(task_id="make_choice", dag=self.dag)
self.branch_1.set_upstream(self.branch_op)
self.branch_2.set_upstream(self.branch_op)
self.dag.clear()
dagrun = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dagrun.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.SKIPPED)
else:
raise Exception
def test_with_skip_in_branch_downstream_dependencies(self):
self.branch_op = ChooseBranchOne(task_id="make_choice", dag=self.dag)
self.branch_op >> self.branch_1 >> self.branch_2
self.branch_op >> self.branch_2
self.dag.clear()
dagrun = self.dag.create_dagrun(
run_type=DagRunType.MANUAL,
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dagrun.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEqual(ti.state, State.NONE)
else:
raise Exception
| wooga/airflow | tests/operators/test_branch_operator.py | Python | apache-2.0 | 6,484 |
class StreamlinkError(Exception):
"""Any error caused by Streamlink will be caught
with this exception."""
class PluginError(StreamlinkError):
"""Plugin related error."""
class FatalPluginError(PluginError):
"""
Plugin related error that cannot be recovered from
Plugin's should use this Exception when errors that can
never be recovered from are encountered. For example, when
a user's input is required an none can be given.
"""
class NoStreamsError(StreamlinkError):
def __init__(self, url):
self.url = url
err = "No streams found on this URL: {0}".format(url)
Exception.__init__(self, err)
class NoPluginError(PluginError):
"""No relevant plugin has been loaded."""
class StreamError(StreamlinkError):
"""Stream related error."""
__all__ = ["StreamlinkError", "PluginError", "NoPluginError",
"NoStreamsError", "StreamError"]
| chhe/streamlink | src/streamlink/exceptions.py | Python | bsd-2-clause | 928 |
from .iotd_service import IotdService
| astrobin/astrobin | astrobin_apps_iotd/services/__init__.py | Python | agpl-3.0 | 38 |
#! /usr/bin/env python3
""" Cruft checker and hole filler for overrides
@contact: Debian FTPMaster <ftpmaster@debian.org>
@copyright: 2000, 2001, 2002, 2004, 2006 James Troup <james@nocrew.org>
@opyright: 2005 Jeroen van Wolffelaar <jeroen@wolffelaar.nl>
@copyright: 2011 Joerg Jaspert <joerg@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
######################################################################
# NB: dak check-overrides is not a good idea with New Incoming as it #
# doesn't take into account accepted. You can minimize the impact #
# of this by running it immediately after dak process-accepted but #
# that's still racy because 'dak process-new' doesn't lock with 'dak #
# process-accepted'. A better long term fix is the evil plan for #
# accepted to be in the DB. #
######################################################################
# dak check-overrides should now work fine being done during
# cron.daily, for example just before 'dak make-overrides' (after 'dak
# process-accepted' and 'dak make-suite-file-list'). At that point,
# queue/accepted should be empty and installed, so... dak
# check-overrides does now take into account suites sharing overrides
# TODO:
# * Only update out-of-sync overrides when corresponding versions are equal to
# some degree
# * consistency checks like:
# - section=debian-installer only for udeb and # dsc
# - priority=optional if dsc
# - (suite, package, 'dsc') is unique,
# - just as (suite, package, (u)deb) (yes, across components!)
# - sections match their component (each component has an own set of sections,
# could probably be reduced...)
################################################################################
import sys
import apt_pkg
from daklib.config import Config
from daklib.dbconn import *
from daklib import daklog
from daklib import utils
################################################################################
Options = None #: Commandline arguments parsed into this
Logger = None #: Our logging object
sections = {}
priorities = {}
blacklist = {}
################################################################################
def usage(exit_code=0):
print("""Usage: dak check-overrides
Check for cruft in overrides.
-n, --no-action don't do anything
-h, --help show this help and exit""")
sys.exit(exit_code)
################################################################################
def process(osuite, affected_suites, originosuite, component, otype, session):
global Logger, Options, sections, priorities
o = get_suite(osuite, session)
if o is None:
utils.fubar("Suite '%s' not recognised." % (osuite))
osuite_id = o.suite_id
originosuite_id = None
if originosuite:
oo = get_suite(originosuite, session)
if oo is None:
utils.fubar("Suite '%s' not recognised." % (originosuite))
originosuite_id = oo.suite_id
c = get_component(component, session)
if c is None:
utils.fubar("Component '%s' not recognised." % (component))
component_id = c.component_id
ot = get_override_type(otype, session)
if ot is None:
utils.fubar("Type '%s' not recognised. (Valid types are deb, udeb and dsc)" % (otype))
type_id = ot.overridetype_id
dsc_type_id = get_override_type("dsc", session).overridetype_id
source_priority_id = get_priority("optional", session).priority_id
if otype == "deb" or otype == "udeb":
packages = {}
# TODO: Fix to use placeholders (check how to with arrays)
q = session.execute("""
SELECT b.package
FROM binaries b
JOIN bin_associations ba ON b.id = ba.bin
JOIN suite ON ba.suite = suite.id
JOIN files_archive_map af ON b.file = af.file_id AND suite.archive_id = af.archive_id
WHERE b.type = :otype AND ba.suite IN (%s) AND af.component_id = :component_id
""" % (",".join([str(i) for i in affected_suites])), {'otype': otype, 'component_id': component_id})
for i in q.fetchall():
packages[i[0]] = 0
src_packages = {}
q = session.execute("""
SELECT s.source FROM source s
JOIN src_associations sa ON s.id = sa.source
JOIN suite ON sa.suite = suite.id
JOIN files_archive_map af ON s.file = af.file_id AND suite.archive_id = af.archive_id
WHERE sa.suite IN (%s) AND af.component_id = :component_id
""" % (",".join([str(i) for i in affected_suites])), {'component_id': component_id})
for i in q.fetchall():
src_packages[i[0]] = 0
# -----------
# Drop unused overrides
q = session.execute("""SELECT package, priority, section, maintainer
FROM override WHERE suite = :suite_id
AND component = :component_id AND type = :type_id""",
{'suite_id': osuite_id, 'component_id': component_id,
'type_id': type_id})
# We're already within a transaction
if otype == "dsc":
for i in q.fetchall():
package = i[0]
if package in src_packages:
src_packages[package] = 1
else:
if package in blacklist:
utils.warn("%s in incoming, not touching" % package)
continue
Logger.log(["removing unused override", osuite, component,
otype, package, priorities[i[1]], sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""DELETE FROM override WHERE package = :package
AND suite = :suite_id AND component = :component_id
AND type = :type_id
AND created < now() - interval '14 days'""",
{'package': package, 'suite_id': osuite_id,
'component_id': component_id, 'type_id': type_id})
# create source overrides based on binary overrides, as source
# overrides not always get created
q = session.execute("""SELECT package, priority, section, maintainer
FROM override WHERE suite = :suite_id AND component = :component_id""",
{'suite_id': osuite_id, 'component_id': component_id})
for i in q.fetchall():
package = i[0]
if package not in src_packages or src_packages[package]:
continue
src_packages[package] = 1
Logger.log(["add missing override", osuite, component,
otype, package, "source", sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""INSERT INTO override (package, suite, component,
priority, section, type, maintainer)
VALUES (:package, :suite_id, :component_id,
:priority_id, :section_id, :type_id, :maintainer)""",
{'package': package, 'suite_id': osuite_id,
'component_id': component_id, 'priority_id': source_priority_id,
'section_id': i[2], 'type_id': dsc_type_id, 'maintainer': i[3]})
# Check whether originosuite has an override for us we can
# copy
if originosuite:
q = session.execute("""SELECT origin.package, origin.priority, origin.section,
origin.maintainer, target.priority, target.section,
target.maintainer
FROM override origin
LEFT JOIN override target ON (origin.package = target.package
AND target.suite = :suite_id
AND origin.component = target.component
AND origin.type = target.type)
WHERE origin.suite = :originsuite_id
AND origin.component = :component_id
AND origin.type = :type_id""",
{'suite_id': osuite_id, 'originsuite_id': originosuite_id,
'component_id': component_id, 'type_id': type_id})
for i in q.fetchall():
package = i[0]
if package not in src_packages or src_packages[package]:
if i[4] and (i[1] != i[4] or i[2] != i[5] or i[3] != i[6]):
Logger.log(["syncing override", osuite, component,
otype, package, "source", sections[i[5]], i[6], "source", sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""UPDATE override
SET priority = :priority,
section = :section,
maintainer = :maintainer
WHERE package = :package AND suite = :suite_id
AND component = :component_id AND type = :type_id""",
{'priority': i[1],
'section': i[2], 'maintainer': i[3],
'package': package, 'suite_id': osuite_id,
'component_id': component_id, 'type_id': dsc_type_id})
continue
# we can copy
src_packages[package] = 1
Logger.log(["copying missing override", osuite, component,
otype, package, "source", sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""INSERT INTO override (package, suite, component,
priority, section, type, maintainer)
VALUES (:package, :suite_id, :component_id,
:priority_id, :section_id, :type_id,
:maintainer)""",
{'package': package, 'suite_id': osuite_id,
'component_id': component_id, 'priority_id': source_priority_id,
'section_id': i[2], 'type_id': dsc_type_id, 'maintainer': i[3]})
for package, hasoverride in list(src_packages.items()):
if not hasoverride:
utils.warn("%s has no override!" % package)
else: # binary override
for i in q.fetchall():
package = i[0]
if package in packages:
packages[package] = 1
else:
if package in blacklist:
utils.warn("%s in incoming, not touching" % package)
continue
Logger.log(["removing unused override", osuite, component,
otype, package, priorities[i[1]], sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""DELETE FROM override
WHERE package = :package AND suite = :suite_id
AND component = :component_id AND type = :type_id
AND created < now() - interval '14 days'""",
{'package': package, 'suite_id': osuite_id,
'component_id': component_id, 'type_id': type_id})
# Check whether originosuite has an override for us we can
# copy
if originosuite:
q = session.execute("""SELECT origin.package, origin.priority, origin.section,
origin.maintainer, target.priority, target.section,
target.maintainer
FROM override origin LEFT JOIN override target
ON (origin.package = target.package
AND target.suite = :suite_id
AND origin.component = target.component
AND origin.type = target.type)
WHERE origin.suite = :originsuite_id
AND origin.component = :component_id
AND origin.type = :type_id""",
{'suite_id': osuite_id, 'originsuite_id': originosuite_id,
'component_id': component_id, 'type_id': type_id})
for i in q.fetchall():
package = i[0]
if package not in packages or packages[package]:
if i[4] and (i[1] != i[4] or i[2] != i[5] or i[3] != i[6]):
Logger.log(["syncing override", osuite, component,
otype, package, priorities[i[4]], sections[i[5]],
i[6], priorities[i[1]], sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""UPDATE override
SET priority = :priority_id,
section = :section_id,
maintainer = :maintainer
WHERE package = :package
AND suite = :suite_id
AND component = :component_id
AND type = :type_id""",
{'priority_id': i[1], 'section_id': i[2],
'maintainer': i[3], 'package': package,
'suite_id': osuite_id, 'component_id': component_id,
'type_id': type_id})
continue
# we can copy
packages[package] = 1
Logger.log(["copying missing override", osuite, component,
otype, package, priorities[i[1]], sections[i[2]], i[3]])
if not Options["No-Action"]:
session.execute("""INSERT INTO override (package, suite, component,
priority, section, type, maintainer)
VALUES (:package, :suite_id, :component_id,
:priority_id, :section_id, :type_id, :maintainer)""",
{'package': package, 'suite_id': osuite_id,
'component_id': component_id, 'priority_id': i[1],
'section_id': i[2], 'type_id': type_id, 'maintainer': i[3]})
for package, hasoverride in list(packages.items()):
if not hasoverride:
utils.warn("%s has no override!" % package)
session.commit()
sys.stdout.flush()
################################################################################
def main():
global Logger, Options, sections, priorities
cnf = Config()
Arguments = [('h', "help", "Check-Overrides::Options::Help"),
('n', "no-action", "Check-Overrides::Options::No-Action")]
for i in ["help", "no-action"]:
key = "Check-Overrides::Options::%s" % i
if key not in cnf:
cnf[key] = ""
apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv)
Options = cnf.subtree("Check-Overrides::Options")
if Options["Help"]:
usage()
session = DBConn().session()
# init sections, priorities:
# We need forward and reverse
sections = get_sections(session)
for name, entry in list(sections.items()):
sections[entry] = name
priorities = get_priorities(session)
for name, entry in list(priorities.items()):
priorities[entry] = name
if not Options["No-Action"]:
Logger = daklog.Logger("check-overrides")
else:
Logger = daklog.Logger("check-overrides", 1)
for suite in session.query(Suite).filter(Suite.overrideprocess == True): # noqa:E712
originosuite = None
originremark = ''
if suite.overrideorigin is not None:
originosuite = get_suite(suite.overrideorigin, session)
if originosuite is None:
utils.fubar("%s has an override origin suite of %s but it doesn't exist!" % (suite.suite_name, suite.overrideorigin))
originosuite = originosuite.suite_name
originremark = " taking missing from %s" % originosuite
print("Processing %s%s..." % (suite.suite_name, originremark))
# Get a list of all suites that use the override file of 'suite.suite_name' as
# well as the suite
ocodename = suite.codename
suiteids = [x.suite_id for x in session.query(Suite).filter(Suite.overridecodename == ocodename).all()]
if suite.suite_id not in suiteids:
suiteids.append(suite.suite_id)
if len(suiteids) < 1:
utils.fubar("Couldn't find id's of all suites: %s" % suiteids)
for component in session.query(Component).all():
# It is crucial for the dsc override creation based on binary
# overrides that 'dsc' goes first
component_name = component.component_name
otypes = ['dsc']
for ot in session.query(OverrideType):
if ot.overridetype == 'dsc':
continue
otypes.append(ot.overridetype)
for otype in otypes:
print("Processing %s [%s - %s]"
% (suite.suite_name, component_name, otype))
sys.stdout.flush()
process(suite.suite_name, suiteids, originosuite, component_name, otype, session)
Logger.close()
################################################################################
if __name__ == '__main__':
main()
| Debian/dak | dak/check_overrides.py | Python | gpl-2.0 | 19,666 |
#!/usr/bin/env python3
import time
import pyclamster
import logging
import numpy as np
import os
import pickle
logging.basicConfig(level=logging.DEBUG)
start_time = time.time()
# read an image
img = pyclamster.image.Image(os.path.join("examples/images/wolf/",
"Image_20160527_144000_UTCp1_3.jpg"))
# convert to grayscale
img.image = img.convert("L")
# resize image
img.image = img.resize((200,200))
### create rectified coordinates ###
outshape=(300,300) # size of output image
rect_azimuth_offset = 3/2 * np.pi # north angle of rectified image
rect_clockwise = True
rect_x,rect_y=np.meshgrid(
np.linspace(-20,20,num=outshape[1]),# image x coordinate goes right
np.linspace(20,-20,num=outshape[0]) # image y coordinate goes up
)
rect_z = 4 # rectify for height rect_z
rect_coord = pyclamster.coordinates.Coordinates3d(
x = rect_x,
y = rect_y,
z = rect_z,
azimuth_offset = rect_azimuth_offset,
azimuth_clockwise = rect_clockwise,
shape=outshape
)
### create spherical coordinates of original image ###
# read calibration of wolf-3-camera
calibrationfile = "examples/calibration/wolf-3-calibration.pk"
calibration = pickle.load(open(calibrationfile,"rb"))
# get calibrated coordinates
img.coordinates = calibration.create_coordinates(img.data.shape)
img.coordinates.z = rect_z
### create rectification map ###
distmapfile = "examples/fisheye/fisheye-wolf-distmap.pk"
if True and os.path.exists(distmapfile): # use distmap from file
logging.debug("read rectifiation map from file")
distmap = pickle.load(open(distmapfile,"rb"))
else: # calculate distmap
# based on regular grid
logging.debug("calculating rectification map")
distmap = pyclamster.fisheye.FisheyeProjection.distortionMap(
in_coord=img.coordinates, out_coord=rect_coord, method="nearest"
,basedon="spherical")
### rectify image ##
rectimage = img.applyDistortionMap(distmap)
### plot results ###
import matplotlib.pyplot as plt
plt.subplot(3,4,1)
plt.title("original image (fix)")
plt.imshow(img.data, interpolation="nearest", cmap='Greys_r')
plt.subplot(3,4,2)
plt.title("image radius (calculated)")
plt.imshow(img.coordinates.radiush, interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,3)
plt.title("rectified r (calculated)")
plt.imshow(rect_coord.radiush,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,4)
plt.title("rectified image (calculated)")
plt.imshow(rectimage.data, interpolation="nearest", cmap='Greys_r')
plt.subplot(3,4,5)
plt.title("image elevation (fix)")
plt.imshow(img.coordinates.elevation,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,9)
plt.title("image azimuth (fix)")
plt.imshow(img.coordinates.azimuth,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,6)
plt.title("image x (calculated)")
plt.imshow(img.coordinates.x,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,10)
plt.title("image y (calculated)")
plt.imshow(img.coordinates.y,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,7)
plt.title("rectified x (fix)")
plt.imshow(rect_coord.x,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,11)
plt.title("rectified y (fix)")
plt.imshow(rect_coord.y,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,8)
plt.title("rectified elevation (calculated)")
plt.imshow(rect_coord.elevation,interpolation="nearest")
plt.colorbar()
plt.subplot(3,4,12)
plt.title("rectified azimuth (calculated)")
plt.imshow(rect_coord.azimuth,interpolation="nearest")
plt.colorbar()
logging.debug("Time elapsed: {0:.3f} s".format(time.time()-start_time))
plt.show()
| LEX2016WoKaGru/pyClamster | examples/fisheye/fisheye-wolf.py | Python | gpl-3.0 | 3,571 |
# -*- coding: utf-8 -*-
from django.conf import settings
import requests
ERRORS = {
'missing-input-secret': 'reCAPTCHA: O campo chave está vazio',
'invalid-input-secret': 'reCAPTCHA: O campo chave está errado ou inválido',
'missing-input-response': 'reCAPTCHA: O campo de resposta está vazio',
'invalid-input-response': 'reCAPTCHA: O campo de resposta está errado '
'ou inválido',
'bad-request': 'reCAPTCHA: A requisição está errada ou inválida',
}
def verify(captcha_response, remote_ip=None):
url = "https://www.google.com/recaptcha/api/siteverify"
params = {
'secret': settings.RECAPTCHA_PRIVATE_KEY,
'response': captcha_response,
}
if remote_ip:
params['remoteip'] = remote_ip
verify_response = requests.get(url, params=params, verify=False)
return verify_response.json()
| labhackercd/colab-edemocracia-plugin | src/colab_edemocracia/captcha.py | Python | gpl-3.0 | 890 |
# -*- coding: utf-8 -*-
#
# IoC documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 29 01:43:00 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sensio.sphinx.refinclude', 'sensio.sphinx.configurationblock', 'sensio.sphinx.phpcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sonata ~ NewsBundle'
copyright = u'2010-2015, Thomas Rabaix'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '0.0.1'
# The full version, including alpha/beta/rc tags.
#release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
#latex_documents = [
# ('index', 'PythonElement.tex', u'Python Documentation',
# u'Thomas Rabaix', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
#(source start file, name, description, authors, manual section).
#man_pages = [
# ('index', 'ioc', u'IoC Documentation',
# [u'Thomas Rabaix'], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# ('index', 'IoC', u'IoC Documentation',
# u'Thomas Rabaix', 'IoC', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| kinkinweb/lhvb | vendor/sonata-project/news-bundle/Resources/doc/conf.py | Python | mit | 7,892 |
#! /usr/bin/env python3
"""
This file is part of Pybakalib.
Pybakalib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pybakalib is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pybakalib. If not, see <http://www.gnu.org/licenses/>.
"""
from datetime import datetime
class MarksModule(list):
def __init__(self, module_marks):
super(MarksModule, self).__init__()
if module_marks['results']['predmety'] is None:
return
s = module_marks['results']['predmety']['predmet']
subjects = s if isinstance(s, list) else [s]
for subj in subjects:
self.append(Subject(subj))
def get_subject(self, name):
for subj in self:
if subj.name == name:
return subj
return None
def list_subject_names(self):
return [subj.name for subj in self]
def get_all_averages(self, weights):
averages = []
for subj in self:
averages.append((subj.name, subj.get_weighted_average(weights)))
averages.sort(key=lambda x: x[1] if x[1] is not None else float('-inf'), reverse=True)
return averages
class Subject(object):
def __init__(self, dict_subject):
self.marks = [] # type: List[Mark]
self.name = dict_subject['nazev'] # type: str
self.abbreviation = dict_subject['zkratka'] # type: str
if 'znamky' in dict_subject and dict_subject['znamky'] is not None: # check for empty subjects
z = dict_subject['znamky']['znamka']
marks = z if isinstance(z, list) else [z]
for mark in marks:
self.add_mark(Mark(mark))
self.marks.sort(key=lambda x: x.date)
def add_mark(self, mark):
self.marks.append(mark)
def get_marks(self):
return self.marks
def get_weighted_average(self, weights, up_to=-1):
"""
Returns weighted average of marks. If there are no marks, returns -1.
:keyword up_to Optional number of marks from beginning, for which to calculate average.
"""
up_to = len(self.marks) if up_to == -1 else up_to
w_sum = sum([s.get_weight(weights) for s in self.marks[:up_to]])
a_sum = sum([s.get_weight(weights) * float(s) for s in self.marks[:up_to]])
if w_sum == 0:
return None
else:
return round(a_sum / w_sum, 2)
class Mark(object):
def __init__(self, dict_mark, mark=1, label='pololetní práce'):
self.mark = mark # type: str
self.label = label # type: str
self.date = None # type: datetime
self.description = None # type: str
self.caption = None # type: str
if dict_mark is not None:
self.mark = dict_mark['znamka']
self.caption = dict_mark['caption']
self.description = dict_mark['poznamka']
self.label = dict_mark['ozn']
self.date = datetime.strptime(dict_mark['udeleno'], "%y%m%d%H%M")
def __float__(self):
try:
return float(self.mark.replace('-', '.5'))
except:
return 0.0
def get_weight(self, weights):
if float(self) == 0:
return 0
return weights[self.label]
| vakabus/pybakalib | pybakalib/modules/marks.py | Python | gpl-2.0 | 3,802 |
"""
Implementation of Burger's equation with nonlinear solve in each
timestep
"""
import sys
from dolfin import *
from dolfin_adjoint import *
n = 30
mesh = UnitIntervalMesh(n)
V = FunctionSpace(mesh, "CG", 2)
def Dt(u, u_, timestep):
return (u - u_)/timestep
def main(ic, nu, annotate=False):
u_ = Function(ic, name="Velocity")
u = Function(V, name="VelocityNext")
v = TestFunction(V)
timestep = Constant(1.0/n)
F = (Dt(u, u_, timestep)*v
+ u*u.dx(0)*v + nu*u.dx(0)*v.dx(0))*dx
bc = DirichletBC(V, 0.0, "on_boundary")
t = 0.0
end = 0.2
while (t <= end):
solve(F == 0, u, bc, annotate=annotate)
u_.assign(u, annotate=annotate)
t += float(timestep)
adj_inc_timestep()
return u_
if __name__ == "__main__":
ic = project(Expression("sin(2*pi*x[0])"), V)
nu = Constant(0.0001, name="nu")
forward = main(ic, nu, annotate=True)
J = Functional(forward*forward*dx*dt[FINISH_TIME] + forward*forward*dx*dt[START_TIME])
Jm = assemble(forward*forward*dx + ic*ic*dx)
m = [FunctionControl("Velocity"), Control(nu)]
dJdm = compute_gradient(J, m, forget=False)
def Jfunc(m):
if hasattr(m, 'vector'):
info_green("Perturbing initial condition!!")
lic = m
lnu = nu
else:
info_green("Perturbing diffusivity!!")
lic = ic
lnu = m
forward = main(lic, lnu, annotate=False)
return assemble(forward*forward*dx + lic*lic*dx)
minconv = taylor_test(Jfunc, m, Jm, dJdm)
assert minconv > 1.7
| pf4d/dolfin-adjoint | tests_dolfin/list_parameter/list_parameter.py | Python | lgpl-3.0 | 1,607 |
# Generated by Django 2.2.5 on 2019-09-16 15:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0016_auto_20190914_0836'),
]
operations = [
migrations.AddField(
model_name='entry',
name='source_id',
field=models.CharField(blank=True, db_index=True, max_length=100, null=True, unique=True),
),
]
| dbinetti/barberscore | project/apps/registration/migrations/0017_entry_source_id.py | Python | bsd-2-clause | 443 |
#!/usr/bin/env vpython
#
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Runs WebLayer instrumentation tests against arbitrary versions of tests, the
# client, and the implementation.
#
# Example usage, testing M80 tests and client against master implementation:
# autoninja -C out/Release weblayer_instrumentation_test_versions_apk
# cipd install --root /tmp/M80 chromium/testing/weblayer-x86 m80
# out/Release/bin/run_weblayer_instrumentation_test_versions_apk \
# --test-runner-outdir out/Release
# --client-outdir /tmp/M80/out/Release
# --implementation-outdir out/Release
import argparse
import logging
import operator
import os
import re
import subprocess
import sys
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
# Find src root starting from either the release bin directory or original path.
if os.path.basename(CUR_DIR) == 'bin':
SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(CUR_DIR)))
else:
SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
CUR_DIR))))
TYP_DIR = os.path.join(
SRC_DIR, 'third_party', 'catapult', 'third_party', 'typ')
if TYP_DIR not in sys.path:
sys.path.insert(0, TYP_DIR)
import typ
# Mapping of operator string in the expectation file tags to actual operator.
OP_MAP = {'gte': operator.ge, 'lte': operator.le}
def tag_matches(tag, impl_version='trunk', client_version='trunk'):
"""Test if specified versions match the tag.
Args:
tag: skew test expectation tag, e.g. 'impl_lte_5' or 'client_lte_2'.
impl_version: WebLayer implementation version number or 'trunk'.
client_version: WebLayer implementation version number or 'trunk'.
Returns:
True if the specified versions match the tag.
Raises:
AssertionError if the tag is invalid.
"""
# 'All' is special cased to match anything.
if tag == 'all':
return True
# Extract the three components from the tag.
match = re.match(r'(client|impl)_([gl]te)_([0-9]+)', tag)
assert match is not None, (
'tag must be of the form "{client,impl}_{gte,lte}_$version", found %r' %
tag)
target_str, op_str, tag_version_str = match.groups()
# If a version is specified see if the tag refers to the same target or
# return False otherwise.
if impl_version != 'trunk' and target_str != 'impl':
return False
if client_version != 'trunk' and target_str != 'client':
return False
version = impl_version if impl_version != 'trunk' else client_version
assert type(version) == int, 'Specified version must be an integer.'
tag_version = int(tag_version_str)
op = OP_MAP[op_str]
return op(version, tag_version)
def tests_to_skip(expectation_contents, impl_version='trunk',
client_version='trunk'):
"""Get list of tests to skip for the given version.
Args:
expectation_contents: String containing expectation file contents.
impl_version: WebLayer implementation version number or 'trunk'.
client_version: WebLayer implementation version number or 'trunk'.
Returns:
List of test names to skip.
Raises:
AssertionError if both versions are 'trunk'.
"""
assert impl_version != 'trunk' or client_version != 'trunk'
parser = typ.expectations_parser.TaggedTestListParser(expectation_contents)
tests = []
for expectation in parser.expectations:
assert len(expectation.tags) == 1, (
'Only one tag is allowed per expectation.')
assert len(expectation.results) == 1 and (
typ.json_results.ResultType.Skip in expectation.results), (
'Only "Skip" is supported in the skew test expectations.')
# Iterate over the first (and only) item since can't index over a frozenset.
tag = iter(expectation.tags).next()
if tag_matches(tag, impl_version, client_version):
tests.append(expectation.test)
return tests
def main():
"""Wrapper to call weblayer instrumentation tests with different versions."""
parser = argparse.ArgumentParser(
description='Run weblayer instrumentation tests at different versions.')
parser.add_argument(
'--test-runner-outdir',
required=True,
help='Local build output directory for finding the test runner.')
parser.add_argument(
'--client-outdir',
required=True,
help='Build output directory for WebLayer client.')
parser.add_argument(
'--implementation-outdir',
required=True,
help='Build output directory for WebLayer implementation.')
parser.add_argument(
'--test-expectations',
required=False,
default='',
help=('Test expectations file describing which tests are failing at '
'different versions.'))
# There are two Webview apks that are available for WebLayer skew tests.
# crbug.com/1163652.
parser.add_argument(
'--webview-apk-path',
required=True,
help=('Relative path for the WebLayer implementation library apk. '
'The path is relative to the WebLayer implementation '
'output directory.'))
version_group = parser.add_mutually_exclusive_group(required=True)
version_group.add_argument(
'--client-version',
default='trunk',
help=('Version of the client being used if not trunk. Only set one of '
'--client-version and --impl-version.'))
version_group.add_argument(
'--impl-version',
default='trunk',
help=('Version of the implementation being used if not trunk. Only set '
'one of --client-version and --impl-version.'))
args, remaining_args = parser.parse_known_args()
logging.basicConfig(level=logging.INFO)
# The command line is derived from the resulting command line from
# run_weblayer_instrumentation_test_apk but with parameterized client and
# implementation.
test_runner_srcdir = os.path.normpath(
os.path.join(args.test_runner_outdir, '..', '..'))
executable_path = os.path.join(test_runner_srcdir,
'build/android/test_runner.py')
executable_args = [
'instrumentation',
'--output-directory',
args.client_outdir,
'--runtime-deps-path',
os.path.join(args.client_outdir,
('gen.runtime/weblayer/browser/android/javatests/' +
'weblayer_instrumentation_test_apk.runtime_deps')),
'--test-apk',
os.path.join(args.client_outdir,
'apks/WebLayerInstrumentationTest.apk'),
'--test-jar',
os.path.join(args.client_outdir,
'test.lib.java/WebLayerInstrumentationTest.jar'),
'--apk-under-test',
os.path.join(args.client_outdir, 'apks/WebLayerShellSystemWebView.apk'),
'--use-webview-provider',
os.path.join(args.implementation_outdir, args.webview_apk_path),
'--additional-apk',
os.path.join(args.client_outdir, 'apks/ChromiumNetTestSupport.apk')]
cmd = [sys.executable, executable_path] + executable_args + remaining_args
# Pass along the implementation version if it's set so that tests can
# be filtered through the @MinWebLayerVersion annotation.
# Note: The Chrome Android command line library requires the flag be passed
# with "=" rather than as two arguments.
if args.impl_version != 'trunk':
cmd.append('--impl-version=%s' % args.impl_version)
tests = []
if args.test_expectations:
if args.impl_version != 'trunk':
args.impl_version = int(args.impl_version)
if args.client_version != 'trunk':
args.client_version = int(args.client_version)
with open(args.test_expectations) as expectations_file:
contents = expectations_file.read()
tests = tests_to_skip(contents, impl_version=args.impl_version,
client_version=args.client_version)
if tests:
logging.info('Filtering known failing tests: %s', tests)
cmd.append('--test-filter=-%s' % ':'.join(tests))
logging.info(' '.join(cmd))
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(main())
| nwjs/chromium.src | weblayer/browser/android/javatests/weblayer_instrumentation_test_versions.py | Python | bsd-3-clause | 8,101 |
# coding: utf8
# Author: Rodrigo Bistolfi
# Date: 03/2013
""" Test cases for Nikola ReST extensions.
A base class ReSTExtensionTestCase provides the tests basic behaivor.
Subclasses must override the "sample" class attribute with the ReST markup.
The sample will be rendered as HTML using publish_parts() by setUp().
One method is provided for checking the resulting HTML:
* assertHTMLContains(element, attributes=None, text=None)
The HTML is parsed with lxml for checking against the data you provide. The
method takes an element argument, a string representing the *name* of an HTML
tag, like "script" or "iframe". We will try to find this tag in the document
and perform the tests on it. You can pass a dictionary to the attributes kwarg
representing the name and the value of the tag attributes. The text kwarg takes
a string argument, which will be tested against the contents of the HTML
element.
One last caveat: you need to url unquote your urls if you are going to test
attributes like "src" or "link", since the HTML rendered by docutils will be
always unquoted.
"""
from __future__ import unicode_literals
try:
from io import StringIO
except ImportError:
from StringIO import StringIO # NOQA
from docutils.core import publish_parts
from lxml import html
import mock
import unittest
import nikola.plugins.compile_rest
from nikola.utils import _reload
from base import BaseTestCase
class ReSTExtensionTestCase(BaseTestCase):
""" Base class for testing ReST extensions """
sample = None
def setUp(self):
""" Parse cls.sample into a HTML document tree """
super(ReSTExtensionTestCase, self).setUp()
self.setHtmlFromRst(self.sample)
def setHtmlFromRst(self, rst):
""" Create html output from rst string """
self.html = publish_parts(rst, writer_name="html")["body"]
self.html_doc = html.parse(StringIO(self.html))
def assertHTMLContains(self, element, attributes=None, text=None):
""" Test if HTML document includes an element with the given
attributes and text content
"""
try:
tag = next(self.html_doc.iter(element))
except StopIteration:
raise Exception("<{}> not in {}".format(element, self.html))
else:
if attributes:
arg_attrs = set(attributes.items())
tag_attrs = set(tag.items())
self.assertTrue(arg_attrs.issubset(tag_attrs))
if text:
self.assertIn(text, tag.text)
class ReSTExtensionTestCaseTestCase(ReSTExtensionTestCase):
""" Simple test for our base class :) """
sample = '.. raw:: html\n\n <iframe src="foo" height="bar">spam</iframe>'
def test_test(self):
self.assertHTMLContains("iframe", attributes={"src": "foo"},
text="spam")
self.assertRaises(Exception, self.assertHTMLContains, "eggs", {})
class GistTestCase(ReSTExtensionTestCase):
""" Test GitHubGist.
We will replace get_raw_gist() and get_raw_gist_with_filename()
monkeypatching the GitHubGist class for avoiding network dependency
"""
gist_type = nikola.plugins.compile_rest.GitHubGist
sample = '.. gist:: fake_id\n :file: spam.py'
sample_without_filename = '.. gist:: fake_id2'
def setUp(self):
""" Patch GitHubGist for avoiding network dependency """
self.gist_type.get_raw_gist_with_filename = lambda *_: 'raw_gist_file'
self.gist_type.get_raw_gist = lambda *_: "raw_gist"
_reload(nikola.plugins.compile_rest)
def test_gist(self):
""" Test the gist directive with filename """
self.setHtmlFromRst(self.sample)
output = 'https://gist.github.com/fake_id.js?file=spam.py'
self.assertHTMLContains("script", attributes={"src": output})
self.assertHTMLContains("pre", text="raw_gist_file")
def test_gist_without_filename(self):
""" Test the gist directive without filename """
self.setHtmlFromRst(self.sample_without_filename)
output = 'https://gist.github.com/fake_id2.js'
self.assertHTMLContains("script", attributes={"src": output})
self.assertHTMLContains("pre", text="raw_gist")
class GistIntegrationTestCase(ReSTExtensionTestCase):
""" Test requests integration. The gist plugin uses requests to fetch gist
contents and place it in a noscript tag.
"""
sample = '.. gist:: 1812835'
def test_gist_integration(self):
""" Fetch contents of the gist from GH and render in a noscript tag """
text = ('Be alone, that is the secret of invention: be alone, that is'
' when ideas are born. -- Nikola Tesla')
self.assertHTMLContains('pre', text=text)
class SlidesTestCase(ReSTExtensionTestCase):
""" Slides test case """
sample = '.. slides:: IMG.jpg\n'
def test_slides(self):
""" Test the slides js generation and img tag creation """
self.assertHTMLContains("img", attributes={"src": "IMG.jpg"})
class SoundCloudTestCase(ReSTExtensionTestCase):
""" SoundCloud test case """
sample = '.. soundcloud:: SID\n :height: 400\n :width: 600'
def test_soundcloud(self):
""" Test SoundCloud iframe tag generation """
self.assertHTMLContains("iframe",
attributes={"src": ("https://w.soundcloud.com"
"/player/?url=http://"
"api.soundcloud.com/"
"tracks/SID"),
"height": "400", "width": "600"})
class VimeoTestCase(ReSTExtensionTestCase):
"""Vimeo test.
Set Vimeo.request_size to False for avoiding querying the Vimeo api
over the network
"""
sample = '.. vimeo:: VID\n :height: 400\n :width: 600'
def setUp(self):
""" Disable query of the vimeo api over the wire """
nikola.plugins.compile_rest.Vimeo.request_size = False
super(VimeoTestCase, self).setUp()
_reload(nikola.plugins.compile_rest)
def test_vimeo(self):
""" Test Vimeo iframe tag generation """
self.assertHTMLContains("iframe",
attributes={"src": ("http://player.vimeo.com/"
"video/VID"),
"height": "400", "width": "600"})
class YoutubeTestCase(ReSTExtensionTestCase):
""" Youtube test case """
sample = '.. youtube:: YID\n :height: 400\n :width: 600'
def test_youtube(self):
""" Test Youtube iframe tag generation """
self.assertHTMLContains("iframe",
attributes={"src": ("http://www.youtube.com/"
"embed/YID?rel=0&hd=1&"
"wmode=transparent"),
"height": "400", "width": "600"})
class ListingTestCase(ReSTExtensionTestCase):
""" Listing test case and CodeBlock alias tests """
sample = '.. listing:: nikola.py python'
sample2 = '.. code-block:: python\n\n import antigravity'
sample3 = '.. sourcecode:: python\n\n import antigravity'
opener_mock = mock.mock_open(read_data="import antigravity\n")
opener_mock.return_value.readlines.return_value = "import antigravity\n"
def setUp(self):
""" Inject a mock open function for not generating a test site """
self.f = StringIO("import antigravity\n")
#_reload(nikola.plugins.compile_rest)
def test_listing(self):
""" Test that we can render a file object contents without errors """
with mock.patch("nikola.plugins.compile_rest.listing.codecs_open", self.opener_mock, create=True):
self.setHtmlFromRst(self.sample)
def test_codeblock_alias(self):
""" Test CodeBlock aliases """
with mock.patch("nikola.plugins.compile_rest.listing.codecs_open", self.opener_mock, create=True):
self.setHtmlFromRst(self.sample2)
self.setHtmlFromRst(self.sample3)
if __name__ == "__main__":
unittest.main()
| servalproject/nikola | tests/test_rst_extensions.py | Python | mit | 8,282 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer, reactor
from twisted.enterprise import adbapi
from synapse.storage._base import LoggingTransaction, SQLBaseStore
from synapse.storage.engines import create_engine
import argparse
import curses
import logging
import sys
import time
import traceback
import yaml
logger = logging.getLogger("port_from_sqlite_to_postgres")
BOOLEAN_COLUMNS = {
"events": ["processed", "outlier"],
"rooms": ["is_public"],
"event_edges": ["is_state"],
"presence_list": ["accepted"],
}
APPEND_ONLY_TABLES = [
"event_content_hashes",
"event_reference_hashes",
"event_signatures",
"event_edge_hashes",
"events",
"event_json",
"state_events",
"room_memberships",
"feedback",
"topics",
"room_names",
"rooms",
"local_media_repository",
"local_media_repository_thumbnails",
"remote_media_cache",
"remote_media_cache_thumbnails",
"redactions",
"event_edges",
"event_auth",
"received_transactions",
"sent_transactions",
"transaction_id_to_pdu",
"users",
"state_groups",
"state_groups_state",
"event_to_state_groups",
"rejections",
]
end_error_exec_info = None
class Store(object):
"""This object is used to pull out some of the convenience API from the
Storage layer.
*All* database interactions should go through this object.
"""
def __init__(self, db_pool, engine):
self.db_pool = db_pool
self.database_engine = engine
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
_simple_insert = SQLBaseStore.__dict__["_simple_insert"]
_simple_select_onecol_txn = SQLBaseStore.__dict__["_simple_select_onecol_txn"]
_simple_select_onecol = SQLBaseStore.__dict__["_simple_select_onecol"]
_simple_select_one_onecol = SQLBaseStore.__dict__["_simple_select_one_onecol"]
_simple_select_one_onecol_txn = SQLBaseStore.__dict__["_simple_select_one_onecol_txn"]
_simple_update_one = SQLBaseStore.__dict__["_simple_update_one"]
_simple_update_one_txn = SQLBaseStore.__dict__["_simple_update_one_txn"]
_execute_and_decode = SQLBaseStore.__dict__["_execute_and_decode"]
def runInteraction(self, desc, func, *args, **kwargs):
def r(conn):
try:
i = 0
N = 5
while True:
try:
txn = conn.cursor()
return func(
LoggingTransaction(txn, desc, self.database_engine, []),
*args, **kwargs
)
except self.database_engine.module.DatabaseError as e:
if self.database_engine.is_deadlock(e):
logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
if i < N:
i += 1
conn.rollback()
continue
raise
except Exception as e:
logger.debug("[TXN FAIL] {%s} %s", desc, e)
raise
return self.db_pool.runWithConnection(r)
def execute(self, f, *args, **kwargs):
return self.runInteraction(f.__name__, f, *args, **kwargs)
def execute_sql(self, sql, *args):
def r(txn):
txn.execute(sql, args)
return txn.fetchall()
return self.runInteraction("execute_sql", r)
def insert_many_txn(self, txn, table, headers, rows):
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
table,
", ".join(k for k in headers),
", ".join("%s" for _ in headers)
)
try:
txn.executemany(sql, rows)
except:
logger.exception(
"Failed to insert: %s",
table,
)
raise
class Porter(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@defer.inlineCallbacks
def setup_table(self, table):
if table in APPEND_ONLY_TABLES:
# It's safe to just carry on inserting.
next_chunk = yield self.postgres_store._simple_select_one_onecol(
table="port_from_sqlite3",
keyvalues={"table_name": table},
retcol="rowid",
allow_none=True,
)
total_to_port = None
if next_chunk is None:
if table == "sent_transactions":
next_chunk, already_ported, total_to_port = (
yield self._setup_sent_transactions()
)
else:
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={"table_name": table, "rowid": 1}
)
next_chunk = 1
already_ported = 0
if total_to_port is None:
already_ported, total_to_port = yield self._get_total_count_to_port(
table, next_chunk
)
else:
def delete_all(txn):
txn.execute(
"DELETE FROM port_from_sqlite3 WHERE table_name = %s",
(table,)
)
txn.execute("TRUNCATE %s CASCADE" % (table,))
yield self.postgres_store.execute(delete_all)
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={"table_name": table, "rowid": 0}
)
next_chunk = 1
already_ported, total_to_port = yield self._get_total_count_to_port(
table, next_chunk
)
defer.returnValue((table, already_ported, total_to_port, next_chunk))
@defer.inlineCallbacks
def handle_table(self, table, postgres_size, table_size, next_chunk):
if not table_size:
return
self.progress.add_table(table, postgres_size, table_size)
select = (
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
% (table,)
)
while True:
def r(txn):
txn.execute(select, (next_chunk, self.batch_size,))
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
return headers, rows
headers, rows = yield self.sqlite_store.runInteraction("select", r)
if rows:
next_chunk = rows[-1][0] + 1
self._convert_rows(table, headers, rows)
def insert(txn):
self.postgres_store.insert_many_txn(
txn, table, headers[1:], rows
)
self.postgres_store._simple_update_one_txn(
txn,
table="port_from_sqlite3",
keyvalues={"table_name": table},
updatevalues={"rowid": next_chunk},
)
yield self.postgres_store.execute(insert)
postgres_size += len(rows)
self.progress.update(table, postgres_size)
else:
return
def setup_db(self, db_config, database_engine):
db_conn = database_engine.module.connect(
**{
k: v for k, v in db_config.get("args", {}).items()
if not k.startswith("cp_")
}
)
database_engine.prepare_database(db_conn)
db_conn.commit()
@defer.inlineCallbacks
def run(self):
try:
sqlite_db_pool = adbapi.ConnectionPool(
self.sqlite_config["name"],
**self.sqlite_config["args"]
)
postgres_db_pool = adbapi.ConnectionPool(
self.postgres_config["name"],
**self.postgres_config["args"]
)
sqlite_engine = create_engine("sqlite3")
postgres_engine = create_engine("psycopg2")
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
self.postgres_store = Store(postgres_db_pool, postgres_engine)
yield self.postgres_store.execute(
postgres_engine.check_database
)
# Step 1. Set up databases.
self.progress.set_state("Preparing SQLite3")
self.setup_db(sqlite_config, sqlite_engine)
self.progress.set_state("Preparing PostgreSQL")
self.setup_db(postgres_config, postgres_engine)
# Step 2. Get tables.
self.progress.set_state("Fetching tables")
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
table="sqlite_master",
keyvalues={
"type": "table",
},
retcol="name",
)
postgres_tables = yield self.postgres_store._simple_select_onecol(
table="information_schema.tables",
keyvalues={
"table_schema": "public",
},
retcol="distinct table_name",
)
tables = set(sqlite_tables) & set(postgres_tables)
self.progress.set_state("Creating tables")
logger.info("Found %d tables", len(tables))
def create_port_table(txn):
txn.execute(
"CREATE TABLE port_from_sqlite3 ("
" table_name varchar(100) NOT NULL UNIQUE,"
" rowid bigint NOT NULL"
")"
)
try:
yield self.postgres_store.runInteraction(
"create_port_table", create_port_table
)
except Exception as e:
logger.info("Failed to create port table: %s", e)
self.progress.set_state("Setting up")
# Set up tables.
setup_res = yield defer.gatherResults(
[
self.setup_table(table)
for table in tables
if table not in ["schema_version", "applied_schema_deltas"]
and not table.startswith("sqlite_")
],
consumeErrors=True,
)
# Process tables.
yield defer.gatherResults(
[
self.handle_table(*res)
for res in setup_res
],
consumeErrors=True,
)
self.progress.done()
except:
global end_error_exec_info
end_error_exec_info = sys.exc_info()
logger.exception("")
finally:
reactor.stop()
def _convert_rows(self, table, headers, rows):
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
bool_cols = [
i for i, h in enumerate(headers) if h in bool_col_names
]
def conv(j, col):
if j in bool_cols:
return bool(col)
return col
for i, row in enumerate(rows):
rows[i] = tuple(
conv(j, col)
for j, col in enumerate(row)
if j > 0
)
@defer.inlineCallbacks
def _setup_sent_transactions(self):
# Only save things from the last day
yesterday = int(time.time()*1000) - 86400000
# And save the max transaction id from each destination
select = (
"SELECT rowid, * FROM sent_transactions WHERE rowid IN ("
"SELECT max(rowid) FROM sent_transactions"
" GROUP BY destination"
")"
)
def r(txn):
txn.execute(select)
rows = txn.fetchall()
headers = [column[0] for column in txn.description]
ts_ind = headers.index('ts')
return headers, [r for r in rows if r[ts_ind] < yesterday]
headers, rows = yield self.sqlite_store.runInteraction(
"select", r,
)
self._convert_rows("sent_transactions", headers, rows)
inserted_rows = len(rows)
max_inserted_rowid = max(r[0] for r in rows)
def insert(txn):
self.postgres_store.insert_many_txn(
txn, "sent_transactions", headers[1:], rows
)
yield self.postgres_store.execute(insert)
def get_start_id(txn):
txn.execute(
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
" ORDER BY rowid ASC LIMIT 1",
(yesterday,)
)
rows = txn.fetchall()
if rows:
return rows[0][0]
else:
return 1
next_chunk = yield self.sqlite_store.execute(get_start_id)
next_chunk = max(max_inserted_rowid + 1, next_chunk)
yield self.postgres_store._simple_insert(
table="port_from_sqlite3",
values={"table_name": "sent_transactions", "rowid": next_chunk}
)
def get_sent_table_size(txn):
txn.execute(
"SELECT count(*) FROM sent_transactions"
" WHERE ts >= ?",
(yesterday,)
)
size, = txn.fetchone()
return int(size)
remaining_count = yield self.sqlite_store.execute(
get_sent_table_size
)
total_count = remaining_count + inserted_rows
defer.returnValue((next_chunk, inserted_rows, total_count))
@defer.inlineCallbacks
def _get_remaining_count_to_port(self, table, next_chunk):
rows = yield self.sqlite_store.execute_sql(
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
next_chunk,
)
defer.returnValue(rows[0][0])
@defer.inlineCallbacks
def _get_already_ported_count(self, table):
rows = yield self.postgres_store.execute_sql(
"SELECT count(*) FROM %s" % (table,),
)
defer.returnValue(rows[0][0])
@defer.inlineCallbacks
def _get_total_count_to_port(self, table, next_chunk):
remaining, done = yield defer.gatherResults(
[
self._get_remaining_count_to_port(table, next_chunk),
self._get_already_ported_count(table),
],
consumeErrors=True,
)
remaining = int(remaining) if remaining else 0
done = int(done) if done else 0
defer.returnValue((done, remaining + done))
##############################################
###### The following is simply UI stuff ######
##############################################
class Progress(object):
"""Used to report progress of the port
"""
def __init__(self):
self.tables = {}
self.start_time = int(time.time())
def add_table(self, table, cur, size):
self.tables[table] = {
"start": cur,
"num_done": cur,
"total": size,
"perc": int(cur * 100 / size),
}
def update(self, table, num_done):
data = self.tables[table]
data["num_done"] = num_done
data["perc"] = int(num_done * 100 / data["total"])
def done(self):
pass
class CursesProgress(Progress):
"""Reports progress to a curses window
"""
def __init__(self, stdscr):
self.stdscr = stdscr
curses.use_default_colors()
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_GREEN, -1)
self.last_update = 0
self.finished = False
self.total_processed = 0
self.total_remaining = 0
super(CursesProgress, self).__init__()
def update(self, table, num_done):
super(CursesProgress, self).update(table, num_done)
self.total_processed = 0
self.total_remaining = 0
for table, data in self.tables.items():
self.total_processed += data["num_done"] - data["start"]
self.total_remaining += data["total"] - data["num_done"]
self.render()
def render(self, force=False):
now = time.time()
if not force and now - self.last_update < 0.2:
# reactor.callLater(1, self.render)
return
self.stdscr.clear()
rows, cols = self.stdscr.getmaxyx()
duration = int(now) - int(self.start_time)
minutes, seconds = divmod(duration, 60)
duration_str = '%02dm %02ds' % (minutes, seconds,)
if self.finished:
status = "Time spent: %s (Done!)" % (duration_str,)
else:
if self.total_processed > 0:
left = float(self.total_remaining) / self.total_processed
est_remaining = (int(now) - self.start_time) * left
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
else:
est_remaining_str = "Unknown"
status = (
"Time spent: %s (est. remaining: %s)"
% (duration_str, est_remaining_str,)
)
self.stdscr.addstr(
0, 0,
status,
curses.A_BOLD,
)
max_len = max([len(t) for t in self.tables.keys()])
left_margin = 5
middle_space = 1
items = self.tables.items()
items.sort(
key=lambda i: (i[1]["perc"], i[0]),
)
for i, (table, data) in enumerate(items):
if i + 2 >= rows:
break
perc = data["perc"]
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
self.stdscr.addstr(
i+2, left_margin + max_len - len(table),
table,
curses.A_BOLD | color,
)
size = 20
progress = "[%s%s]" % (
"#" * int(perc*size/100),
" " * (size - int(perc*size/100)),
)
self.stdscr.addstr(
i+2, left_margin + max_len + middle_space,
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
)
if self.finished:
self.stdscr.addstr(
rows-1, 0,
"Press any key to exit...",
)
self.stdscr.refresh()
self.last_update = time.time()
def done(self):
self.finished = True
self.render(True)
self.stdscr.getch()
def set_state(self, state):
self.stdscr.clear()
self.stdscr.addstr(
0, 0,
state + "...",
curses.A_BOLD,
)
self.stdscr.refresh()
class TerminalProgress(Progress):
"""Just prints progress to the terminal
"""
def update(self, table, num_done):
super(TerminalProgress, self).update(table, num_done)
data = self.tables[table]
print "%s: %d%% (%d/%d)" % (
table, data["perc"],
data["num_done"], data["total"],
)
def set_state(self, state):
print state + "..."
##############################################
##############################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A script to port an existing synapse SQLite database to"
" a new PostgreSQL database."
)
parser.add_argument("-v", action='store_true')
parser.add_argument(
"--sqlite-database", required=True,
help="The snapshot of the SQLite database file. This must not be"
" currently used by a running synapse server"
)
parser.add_argument(
"--postgres-config", type=argparse.FileType('r'), required=True,
help="The database config file for the PostgreSQL database"
)
parser.add_argument(
"--curses", action='store_true',
help="display a curses based progress UI"
)
parser.add_argument(
"--batch-size", type=int, default=1000,
help="The number of rows to select from the SQLite table each"
" iteration [default=1000]",
)
args = parser.parse_args()
logging_config = {
"level": logging.DEBUG if args.v else logging.INFO,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
}
if args.curses:
logging_config["filename"] = "port-synapse.log"
logging.basicConfig(**logging_config)
sqlite_config = {
"name": "sqlite3",
"args": {
"database": args.sqlite_database,
"cp_min": 1,
"cp_max": 1,
"check_same_thread": False,
},
}
postgres_config = yaml.safe_load(args.postgres_config)
if "database" in postgres_config:
postgres_config = postgres_config["database"]
if "name" not in postgres_config:
sys.stderr.write("Malformed database config: no 'name'")
sys.exit(2)
if postgres_config["name"] != "psycopg2":
sys.stderr.write("Database must use 'psycopg2' connector.")
sys.exit(3)
def start(stdscr=None):
if stdscr:
progress = CursesProgress(stdscr)
else:
progress = TerminalProgress()
porter = Porter(
sqlite_config=sqlite_config,
postgres_config=postgres_config,
progress=progress,
batch_size=args.batch_size,
)
reactor.callWhenRunning(porter.run)
reactor.run()
if args.curses:
curses.wrapper(start)
else:
start()
if end_error_exec_info:
exc_type, exc_value, exc_traceback = end_error_exec_info
traceback.print_exception(exc_type, exc_value, exc_traceback)
| illicitonion/synapse | scripts/port_from_sqlite_to_postgres.py | Python | apache-2.0 | 22,586 |
import unittest
import wradlib as wrl
import numpy as np
import zlib
import tempfile
import os
import datetime
import io # import StringIO
from collections import OrderedDict
class IOTest(unittest.TestCase):
# testing functions related to readDX
def test__getTimestampFromFilename(self):
filename = 'raa00-dx_10488-200608050000-drs---bin'
self.assertEqual(wrl.io._getTimestampFromFilename(filename), datetime.datetime(2006,8,5,0))
filename = 'raa00-dx_10488-0608050000-drs---bin'
self.assertEqual(wrl.io._getTimestampFromFilename(filename), datetime.datetime(2006,8,5,0))
def test_getDXTimestamp(self):
filename = 'raa00-dx_10488-200608050000-drs---bin'
self.assertEqual(wrl.io.getDXTimestamp(filename).__str__(), '2006-08-05 00:00:00+00:00')
filename = 'raa00-dx_10488-0608050000-drs---bin'
self.assertEqual(wrl.io.getDXTimestamp(filename).__str__(), '2006-08-05 00:00:00+00:00')
def test_unpackDX(self):
pass
def test_readDX(self):
pass
def test_writePolygon2Text(self):
poly1 = [[0.,0.,0.,0.],[0.,1.,0.,1.],[1.,1.,0.,2.],[0.,0.,0.,0.]]
poly2 = [[0.,0.,0.,0.],[0.,1.,0.,1.],[1.,1.,0.,2.],[0.,0.,0.,0.]]
polygons = [poly1, poly2]
res = ['Polygon\n', '0 0\n', '0 0.000000 0.000000 0.000000 0.000000\n', '1 0.000000 1.000000 0.000000 1.000000\n', '2 1.000000 1.000000 0.000000 2.000000\n', '3 0.000000 0.000000 0.000000 0.000000\n', '1 0\n', '0 0.000000 0.000000 0.000000 0.000000\n', '1 0.000000 1.000000 0.000000 1.000000\n', '2 1.000000 1.000000 0.000000 2.000000\n', '3 0.000000 0.000000 0.000000 0.000000\n', 'END\n']
tmp = tempfile.NamedTemporaryFile()
wrl.io.writePolygon2Text(tmp.name, polygons)
self.assertEqual(open(tmp.name, 'r').readlines(), res)
class PickleTest(unittest.TestCase):
def test_pickle(self):
arr = np.zeros((124, 248), dtype=np.int16)
tmp = tempfile.NamedTemporaryFile()
wrl.io.to_pickle(tmp.name, arr)
res = wrl.io.from_pickle(tmp.name)
self.assertTrue(np.allclose(arr, res))
class HDF5Test(unittest.TestCase):
def test_to_hdf5(self):
arr = np.zeros((124, 248), dtype=np.int16)
metadata = {'test': 12.}
tmp = tempfile.NamedTemporaryFile()
wrl.io.to_hdf5(tmp.name, arr, metadata=metadata)
res, resmeta = wrl.io.from_hdf5(tmp.name)
self.assertTrue(np.allclose(arr, res))
self.assertDictEqual(metadata, resmeta)
class RadolanTest(unittest.TestCase):
def test_get_radolan_header_token(self):
keylist = ['BY', 'VS', 'SW', 'PR', 'INT', 'GP',
'MS', 'LV', 'CS', 'MX', 'BG']
head = wrl.io.get_radolan_header_token()
for key in keylist:
self.assertIsNone(head[key])
def test_get_radolan_header_token_pos(self):
header = 'RW030950100000814BY1620130VS 3SW 2.13.1PR E-01INT 60GP 900x 900' \
'MS 58<boo,ros,emd,hnr,pro,ess,asd,neu,nhb,oft,tur,isn,fbg,mem>'
test_head = wrl.io.get_radolan_header_token()
test_head['PR'] = (43, 48)
test_head['GP'] = (57, 66)
test_head['INT'] = (51, 55)
test_head['SW'] = (32, 41)
test_head['VS'] = (28, 30)
test_head['MS'] = (68, 128)
test_head['BY'] = (19, 26)
head = wrl.io.get_radolan_header_token_pos(header)
self.assertDictEqual(head, test_head)
def test_decode_radolan_runlength_line(self):
testarr = [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9., 9.,
9., 9., 9., 9., 9., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
testline = b'\x10\x98\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xd9\n'
testattrs = {'ncol': 460, 'nodataflag': 0}
arr = np.fromstring(testline, np.uint8).astype(np.uint8)
line = wrl.io.decode_radolan_runlength_line(arr, testattrs)
self.assertTrue(np.allclose(line, testarr))
def test_read_radolan_runlength_line(self):
testline = b'\x10\x98\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xf9\xd9\n'
testarr = np.fromstring(testline, np.uint8).astype(np.uint8)
fid, temp_path = tempfile.mkstemp()
tmp_id = open(temp_path, 'wb')
tmp_id.write(testline)
tmp_id.close()
tmp_id = open(temp_path, 'rb')
line = wrl.io.read_radolan_runlength_line(tmp_id)
tmp_id.close()
os.close(fid)
os.remove(temp_path)
self.assertTrue(np.allclose(line, testarr))
def test_decode_radolan_runlength_array(self):
pg_file = os.path.dirname(__file__) + '/../../examples/data/raa00-pc_10015-1408030905-dwd---bin.gz'
pg_fid = wrl.io.get_radolan_filehandle(pg_file)
header = wrl.io.read_radolan_header(pg_fid)
attrs = wrl.io.parse_DWD_quant_composite_header(header)
data = wrl.io.read_radolan_binary_array(pg_fid, attrs['datasize'])
attrs['nodataflag'] = 255
arr = wrl.io.decode_radolan_runlength_array(data, attrs)
self.assertEqual(arr.shape, (460, 460))
def test_read_radolan_binary_array(self):
rw_file = os.path.dirname(__file__) + '/../../examples/data/raa01-rw_10000-1408030950-dwd---bin.gz'
rw_fid = wrl.io.get_radolan_filehandle(rw_file)
header = wrl.io.read_radolan_header(rw_fid)
attrs = wrl.io.parse_DWD_quant_composite_header(header)
data = wrl.io.read_radolan_binary_array(rw_fid, attrs['datasize'])
self.assertEqual(len(data), attrs['datasize'])
rw_fid = wrl.io.get_radolan_filehandle(rw_file)
header = wrl.io.read_radolan_header(rw_fid)
attrs = wrl.io.parse_DWD_quant_composite_header(header)
self.assertRaises(IOError, lambda: wrl.io.read_radolan_binary_array(rw_fid, attrs['datasize'] + 10))
def test_get_radolan_filehandle(self):
rw_file = os.path.dirname(__file__) + '/../../examples/data/raa01-rw_10000-1408030950-dwd---bin.gz'
rw_fid = wrl.io.get_radolan_filehandle(rw_file)
self.assertEqual(rw_file, rw_fid.name)
def test_read_radolan_header(self):
rx_header = b'RW030950100000814BY1620130VS 3SW 2.13.1PR E-01INT 60GP 900x 900' \
b'MS 58<boo,ros,emd,hnr,pro,ess,asd,neu,nhb,oft,tur,isn,fbg,mem>'
buf = io.BytesIO(rx_header + b"\x03")
header = wrl.io.read_radolan_header(buf)
self.assertEqual(header, rx_header.decode())
def test_parse_DWD_quant_composite_header(self):
rx_header = 'RW030950100000814BY1620130VS 3SW 2.13.1PR E-01INT 60GP 900x 900' \
'MS 58<boo,ros,emd,hnr,pro,ess,asd,neu,nhb,oft,tur,isn,fbg,mem>'
test_rx = {'maxrange': '150 km', 'radarlocations': ['boo', 'ros', 'emd', 'hnr', 'pro',
'ess', 'asd', 'neu', 'nhb', 'oft',
'tur', 'isn', 'fbg', 'mem'],
'nrow': 900, 'intervalseconds': 3600, 'precision': 0.1,
'datetime': datetime.datetime(2014, 8, 3, 9, 50), 'ncol': 900,
'radolanversion': '2.13.1', 'producttype': 'RW', 'radarid': '10000',
'datasize': 1620001,}
pg_header = 'PG030905100000814BY20042LV 6 1.0 19.0 28.0 37.0 46.0 55.0CS0MX 0MS 82' \
'<boo,ros,emd,hnr,pro,ess,asd,neu,nhb,oft,tur,isn,fbg,mem,czbrd> are used, ' \
'BG460460'
test_pg = {'radarlocations': ['boo', 'ros', 'emd', 'hnr', 'pro', 'ess', 'asd', 'neu',
'nhb', 'oft', 'tur', 'isn', 'fbg', 'mem', 'czbrd'],
'nrow': 460, 'level': [1., 19., 28., 37., 46., 55.],
'datetime': datetime.datetime(2014, 8, 3, 9, 5), 'ncol': 460,
'producttype': 'PG', 'radarid': '10000', 'nlevel': 6,
'indicator': 'near ground level', 'imagecount': 0, 'datasize': 19889}
rx = wrl.io.parse_DWD_quant_composite_header(rx_header)
pg = wrl.io.parse_DWD_quant_composite_header(pg_header)
for key, value in rx.items():
self.assertEqual(value, test_rx[key])
for key, value in pg.items():
if type(value) == np.ndarray:
self.assertTrue(np.allclose(value, test_pg[key]))
else:
self.assertEqual(value, test_pg[key])
def test_read_RADOLAN_composite(self):
rw_file = os.path.dirname(__file__) + '/../../examples/data/raa01-rw_10000-1408030950-dwd---bin.gz'
test_attrs = {'maxrange': '150 km', 'radarlocations': ['boo', 'ros', 'emd', 'hnr', 'pro',
'ess', 'asd', 'neu', 'nhb', 'oft',
'tur', 'isn', 'fbg', 'mem'],
'nrow': 900, 'intervalseconds': 3600,
'precision': 0.1, 'datetime': datetime.datetime(2014, 8, 3, 9, 50),
'ncol': 900, 'radolanversion': '2.13.1', 'producttype': 'RW', 'nodataflag': -9999,
'datasize': 1620000, 'radarid': '10000'}
# test for complete file
data, attrs = wrl.io.read_RADOLAN_composite(rw_file)
self.assertEqual(data.shape, (900, 900))
for key, value in attrs.items():
if type(value) == np.ndarray:
self.assertIn(value.dtype, [np.int32, np.int64])
else:
self.assertEqual(value, test_attrs[key])
# test for loaddata=False
data, attrs = wrl.io.read_RADOLAN_composite(rw_file, loaddata=False)
self.assertEqual(data, None)
for key, value in attrs.items():
if type(value) == np.ndarray:
self.assertEqual(value.dtype, np.int64)
else:
self.assertEqual(value, test_attrs[key])
self.assertRaises(KeyError, lambda: attrs['nodataflag'])
class RainbowTest(unittest.TestCase):
def test_read_rainbow(self):
pass
def test_find_key(self):
indict = {'A': {'AA': {'AAA': 0, 'X': 1},
'AB': {'ABA': 2, 'X': 3},
'AC': {'ACA': 4, 'X': 5}}}
outdict = [{'X': 1, 'AAA': 0}, {'X': 5, 'ACA': 4}, {'ABA': 2, 'X': 3}]
try:
self.assertCountEqual(list(wrl.io.find_key('X', indict)), outdict)
self.assertCountEqual(list(wrl.io.find_key('Y', indict)), [])
except AttributeError:
self.assertItemsEqual(list(wrl.io.find_key('X', indict)), outdict)
self.assertItemsEqual(list(wrl.io.find_key('Y', indict)), [])
def test_decompress(self):
dstring = b'very special compressed string'
cstring = zlib.compress(dstring)
self.assertEqual(wrl.io.decompress(cstring), dstring)
def test_get_RB_data_layout(self):
self.assertEqual(wrl.io.get_RB_data_layout(8), (1, '>u1'))
self.assertEqual(wrl.io.get_RB_data_layout(16), (2, '>u2'))
self.assertEqual(wrl.io.get_RB_data_layout(32), (4, '>u4'))
self.assertRaises(ValueError, lambda: wrl.io.get_RB_data_layout(128))
def test_get_RB_data_attribute(self):
xmltodict = wrl.util.import_optional('xmltodict')
data = xmltodict.parse('<slicedata time="13:30:05" date="2013-04-26"> \
#<rayinfo refid="startangle" blobid="0" rays="361" depth="16"/> \
#<rawdata blobid="1" rays="361" type="dBuZ" bins="400" min="-31.5" max="95.5" depth="8"/> \
#</slicedata>')
data = list(wrl.io.find_key('@blobid', data))
self.assertEqual(wrl.io.get_RB_data_attribute(data[0], 'blobid'), 0)
self.assertEqual(wrl.io.get_RB_data_attribute(data[1], 'blobid'), 1)
self.assertEqual(wrl.io.get_RB_data_attribute(data[0], 'rays'), 361)
self.assertIsNone(wrl.io.get_RB_data_attribute(data[0], 'bins'))
self.assertEqual(wrl.io.get_RB_data_attribute(data[1], 'rays'), 361)
self.assertEqual(wrl.io.get_RB_data_attribute(data[1], 'bins'), 400)
self.assertRaises(KeyError, lambda: wrl.io.get_RB_data_attribute(data[0], 'Nonsense'))
self.assertEqual(wrl.io.get_RB_data_attribute(data[0], 'depth'), 16)
def test_get_RB_blob_attribute(self):
xmltodict = wrl.util.import_optional('xmltodict')
xmldict = xmltodict.parse('<BLOB blobid="0" size="737" compression="qt"></BLOB>')
self.assertEqual(wrl.io.get_RB_blob_attribute(xmldict, 'compression'), 'qt')
self.assertEqual(wrl.io.get_RB_blob_attribute(xmldict, 'size'), '737')
self.assertEqual(wrl.io.get_RB_blob_attribute(xmldict, 'blobid'), '0')
self.assertRaises(KeyError, lambda: wrl.io.get_RB_blob_attribute(xmldict, 'Nonsense'))
def test_map_RB_data(self):
indata = b'0123456789'
outdata8 = np.array([48, 49, 50, 51, 52, 53, 54, 55, 56, 57], dtype=np.uint8)
outdata16 = np.array([12337, 12851, 13365, 13879, 14393], dtype=np.uint16)
outdata32 = np.array([808530483, 875902519], dtype=np.uint32)
self.assertTrue(np.allclose(wrl.io.map_RB_data(indata, 8), outdata8))
self.assertTrue(np.allclose(wrl.io.map_RB_data(indata, 16), outdata16))
self.assertTrue(np.allclose(wrl.io.map_RB_data(indata, 32), outdata32))
def test_get_RB_blob_data(self):
datastring = b'<BLOB blobid="0" size="737" compression="qt"></BLOB>'
self.assertRaises(EOFError, lambda: wrl.io.get_RB_blob_data(datastring, 1))
if __name__ == '__main__':
unittest.main()
| jjhelmus/wradlib | wradlib/tests/test_io.py | Python | mit | 16,073 |
#
# deluge/ui/web/server.py
#
# Copyright (C) 2009-2010 Damien Churchill <damoxc@gmail.com>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
import os
import time
import locale
import shutil
import urllib
import fnmatch
import gettext
import hashlib
import logging
import tempfile
import mimetypes
import pkg_resources
from twisted.application import service, internet
from twisted.internet import reactor, defer, error
from twisted.internet.ssl import SSL
from twisted.web import http, resource, server, static
from deluge import common, component, configmanager
from deluge.core.rpcserver import check_ssl_keys
from deluge.log import setupLogger, LOG as _log
from deluge.ui import common as uicommon
from deluge.ui.tracker_icons import TrackerIcons
from deluge.ui.web.auth import Auth
from deluge.ui.web.common import Template, compress
from deluge.ui.web.json_api import JSON, WebApi
from deluge.ui.web.pluginmanager import PluginManager
log = logging.getLogger(__name__)
# Initialize gettext
try:
locale.setlocale(locale.LC_ALL, "")
if hasattr(locale, "bindtextdomain"):
locale.bindtextdomain("deluge", pkg_resources.resource_filename("deluge", "i18n"))
if hasattr(locale, "textdomain"):
locale.textdomain("deluge")
gettext.bindtextdomain("deluge", pkg_resources.resource_filename("deluge", "i18n"))
gettext.textdomain("deluge")
gettext.install("deluge", pkg_resources.resource_filename("deluge", "i18n"))
except Exception, e:
log.error("Unable to initialize gettext/locale: %s", e)
_ = gettext.gettext
current_dir = os.path.dirname(__file__)
CONFIG_DEFAULTS = {
# Misc Settings
"enabled_plugins": [],
"default_daemon": "",
# Auth Settings
"pwd_salt": "c26ab3bbd8b137f99cd83c2c1c0963bcc1a35cad",
"pwd_sha1": "2ce1a410bcdcc53064129b6d950f2e9fee4edc1e",
"session_timeout": 3600,
"sessions": {},
# UI Settings
"sidebar_show_zero": False,
"sidebar_multiple_filters": True,
"show_session_speed": False,
"show_sidebar": True,
"theme": "gray",
"first_login": True,
# Server Settings
"base": "/",
"port": 8112,
"https": False,
"pkey": "ssl/daemon.pkey",
"cert": "ssl/daemon.cert"
}
UI_CONFIG_KEYS = (
"theme", "sidebar_show_zero", "sidebar_multiple_filters",
"show_session_speed", "base", "first_login"
)
OLD_CONFIG_KEYS = (
"port", "enabled_plugins", "base", "sidebar_show_zero",
"sidebar_show_trackers", "show_keyword_search", "show_sidebar",
"https"
)
def rpath(*paths):
"""Convert a relative path into an absolute path relative to the location
of this script.
"""
return os.path.join(current_dir, *paths)
class GetText(resource.Resource):
def render(self, request):
request.setHeader("content-type", "text/javascript; encoding=utf-8")
template = Template(filename=rpath("gettext.js"))
return compress(template.render(), request)
class Upload(resource.Resource):
"""
Twisted Web resource to handle file uploads
"""
def render(self, request):
"""
Saves all uploaded files to the disk and returns a list of filenames,
each on a new line.
"""
# Block all other HTTP methods.
if request.method != "POST":
request.setResponseCode(http.NOT_ALLOWED)
return ""
if "file" not in request.args:
request.setResponseCode(http.OK)
return common.json.dumps({
'success': True,
'files': []
})
tempdir = tempfile.mkdtemp(prefix="delugeweb-")
log.debug("uploading files to %s", tempdir)
filenames = []
for upload in request.args.get("file"):
fd, fn = tempfile.mkstemp('.torrent', dir=tempdir)
os.write(fd, upload)
os.close(fd)
filenames.append(fn)
log.debug("uploaded %d file(s)", len(filenames))
request.setHeader("content-type", "text/html")
request.setResponseCode(http.OK)
return compress(common.json.dumps({
'success': True,
'files': filenames
}), request)
class Render(resource.Resource):
def getChild(self, path, request):
request.render_file = path
return self
def render(self, request):
if not hasattr(request, "render_file"):
request.setResponseCode(http.INTERNAL_SERVER_ERROR)
return ""
filename = os.path.join("render", request.render_file)
template = Template(filename=rpath(filename))
request.setHeader("content-type", "text/html")
request.setResponseCode(http.OK)
return compress(template.render(), request)
class Tracker(resource.Resource):
def __init__(self):
resource.Resource.__init__(self)
try:
self.tracker_icons = component.get("TrackerIcons")
except KeyError:
self.tracker_icons = TrackerIcons()
def getChild(self, path, request):
request.tracker_name = path
return self
def on_got_icon(self, icon, request):
headers = {}
if icon:
request.setHeader("cache-control",
"public, must-revalidate, max-age=86400")
request.setHeader("content-type", icon.get_mimetype())
request.setResponseCode(http.OK)
request.write(icon.get_data())
request.finish()
else:
request.setResponseCode(http.NOT_FOUND)
request.finish()
def render(self, request):
d = self.tracker_icons.get(request.tracker_name)
d.addCallback(self.on_got_icon, request)
return server.NOT_DONE_YET
class Flag(resource.Resource):
def getChild(self, path, request):
request.country = path
return self
def render(self, request):
headers = {}
path = ("data", "pixmaps", "flags", request.country.lower() + ".png")
filename = pkg_resources.resource_filename("deluge",
os.path.join(*path))
if os.path.exists(filename):
request.setHeader("cache-control",
"public, must-revalidate, max-age=86400")
request.setHeader("content-type", "image/png")
data = open(filename, "rb")
request.setResponseCode(http.OK)
return data.read()
else:
request.setResponseCode(http.NOT_FOUND)
return ""
class LookupResource(resource.Resource, component.Component):
def __init__(self, name, *directories):
resource.Resource.__init__(self)
component.Component.__init__(self, name)
self.__paths = {}
for directory in directories:
self.addDirectory(directory)
def addDirectory(self, directory, path=""):
log.debug("Adding directory `%s` with path `%s`", directory, path)
paths = self.__paths.setdefault(path, [])
paths.append(directory)
def removeDirectory(self, directory, path=""):
log.debug("Removing directory `%s`", directory)
self.__paths[path].remove(directory)
def getChild(self, path, request):
if hasattr(request, 'lookup_path'):
request.lookup_path = os.path.join(request.lookup_path, path)
else:
request.lookup_path = path
return self
def render(self, request):
log.debug("Requested path: '%s'", request.lookup_path)
path = os.path.dirname(request.lookup_path)
if path not in self.__paths:
request.setResponseCode(http.NOT_FOUND)
return "<h1>404 - Not Found</h1>"
filename = os.path.basename(request.path)
for directory in self.__paths[path]:
if os.path.join(directory, filename):
path = os.path.join(directory, filename)
log.debug("Serving path: '%s'", path)
mime_type = mimetypes.guess_type(path)
request.setHeader("content-type", mime_type[0])
return compress(open(path, "rb").read(), request)
request.setResponseCode(http.NOT_FOUND)
return "<h1>404 - Not Found</h1>"
class ScriptResource(resource.Resource, component.Component):
def __init__(self):
resource.Resource.__init__(self)
component.Component.__init__(self, "Scripts")
self.__scripts = {
"normal": {
"scripts": {},
"order": []
},
"debug": {
"scripts": {},
"order": []
},
"dev": {
"scripts": {},
"order": []
}
}
def add_script(self, path, filepath, type=None):
"""
Adds a script or scripts to the script resource.
:param path: The path of the script (this supports globbing)
:type path: string
:param filepath: The physical location of the script
:type filepath: string
:keyword type: The type of script to add (normal, debug, dev)
:param type: string
"""
if type not in ("dev", "debug", "normal"):
type = "normal"
self.__scripts[type]["scripts"][path] = filepath
self.__scripts[type]["order"].append(path)
def add_script_folder(self, path, filepath, type=None, recurse=True):
"""
Adds a folder of scripts to the script resource.
:param path: The path of the folder
:type path: string
:param filepath: The physical location of the script
:type filepath: string
:keyword type: The type of script to add (normal, debug, dev)
:param type: string
:keyword recurse: Whether or not to recurse into other folders
:param recurse: bool
"""
if type not in ("dev", "debug", "normal"):
type = "normal"
self.__scripts[type]["scripts"][path] = (filepath, recurse)
self.__scripts[type]["order"].append(path)
def remove_script(self, path, type=None):
"""
Removes a script or folder of scripts from the script resource.
:param path: The path of the folder
:type path: string
:keyword type: The type of script to add (normal, debug, dev)
:param type: string
"""
if type not in ("dev", "debug", "normal"):
type = "normal"
del self.__scripts[type]["scripts"][path]
self.__scripts[type]["order"].remove(path)
def get_scripts(self, type=None):
"""
Returns a list of the scripts that can be used for producing
script tags.
:keyword type: The type of scripts to get (normal, debug, dev)
:param type: string
"""
scripts = []
if type not in ("dev", "debug", "normal"):
type = 'normal'
_scripts = self.__scripts[type]["scripts"]
_order = self.__scripts[type]["order"]
for path in _order:
filepath = _scripts[path]
# this is a folder
if isinstance(filepath, tuple):
filepath, recurse = filepath
if recurse:
for dirpath, dirnames, filenames in os.walk(filepath, False):
files = fnmatch.filter(filenames, "*.js")
files.sort()
order_file = os.path.join(dirpath, '.order')
if os.path.isfile(order_file):
for line in open(order_file, 'rb'):
line = line.strip()
if not line or line[0] == '#':
continue
try:
pos, filename = line.split()
files.pop(files.index(filename))
if pos == '+':
files.insert(0, filename)
else:
files.append(filename)
except:
pass
dirpath = dirpath[len(filepath)+1:]
if dirpath:
scripts.extend(['js/' + path + '/' + dirpath + '/' + f for f in files])
else:
scripts.extend(['js/' + path + '/' + f for f in files])
else:
files = fnmatch.filter(os.listdir('.'), "*.js")
else:
scripts.append("js/" + path)
return scripts
def getChild(self, path, request):
if hasattr(request, "lookup_path"):
request.lookup_path += '/' + path
else:
request.lookup_path = path
return self
def render(self, request):
log.debug("Requested path: '%s'", request.lookup_path)
for type in ("dev", "debug", "normal"):
scripts = self.__scripts[type]["scripts"]
for pattern in scripts:
if not request.lookup_path.startswith(pattern):
continue
filepath = scripts[pattern]
if isinstance(filepath, tuple):
filepath = filepath[0]
path = filepath + request.lookup_path[len(pattern):]
if not os.path.isfile(path):
continue
log.debug("Serving path: '%s'", path)
mime_type = mimetypes.guess_type(path)
request.setHeader("content-type", mime_type[0])
return compress(open(path, "rb").read(), request)
request.setResponseCode(http.NOT_FOUND)
return "<h1>404 - Not Found</h1>"
class TopLevel(resource.Resource):
addSlash = True
__stylesheets = [
"css/ext-all-notheme.css",
"css/ext-extensions.css",
"css/deluge.css"
]
def __init__(self):
resource.Resource.__init__(self)
self.putChild("css", LookupResource("Css", rpath("css")))
self.putChild("gettext.js", GetText())
self.putChild("flag", Flag())
self.putChild("icons", LookupResource("Icons", rpath("icons")))
self.putChild("images", LookupResource("Images", rpath("images")))
js = ScriptResource()
# configure the dev scripts
js.add_script("ext-base-debug.js", rpath("js", "ext-base-debug.js"), "dev")
js.add_script("ext-all-debug.js", rpath("js", "ext-all-debug.js"), "dev")
js.add_script_folder("ext-extensions", rpath("js", "ext-extensions"), "dev")
js.add_script_folder("deluge-all", rpath("js", "deluge-all"), "dev")
# configure the debug scripts
js.add_script("ext-base-debug.js", rpath("js", "ext-base-debug.js"), "debug")
js.add_script("ext-all-debug.js", rpath("js", "ext-all-debug.js"), "debug")
js.add_script("ext-extensions-debug.js", rpath("js", "ext-extensions-debug.js"), "debug")
js.add_script("deluge-all-debug.js", rpath("js", "deluge-all-debug.js"), "debug")
# configure the normal scripts
js.add_script("ext-base.js", rpath("js", "ext-base.js"))
js.add_script("ext-all.js", rpath("js", "ext-all.js"))
js.add_script("ext-extensions.js", rpath("js", "ext-extensions.js"))
js.add_script("deluge-all.js", rpath("js", "deluge-all.js"))
self.putChild("js", js)
self.putChild("json", JSON())
self.putChild("upload", Upload())
self.putChild("render", Render())
self.putChild("themes", static.File(rpath("themes")))
self.putChild("tracker", Tracker())
theme = component.get("DelugeWeb").config["theme"]
if not os.path.isfile(rpath("themes", "css", "xtheme-%s.css" % theme)):
theme = CONFIG_DEFAULTS.get("theme")
self.__stylesheets.insert(1, "themes/css/xtheme-%s.css" % theme)
@property
def stylesheets(self):
return self.__stylesheets
def add_script(self, script):
"""
Adds a script to the server so it is included in the <head> element
of the index page.
:param script: The path to the script
:type script: string
"""
self.__scripts.append(script)
self.__debug_scripts.append(script)
def remove_script(self, script):
"""
Removes a script from the server.
:param script: The path to the script
:type script: string
"""
self.__scripts.remove(script)
self.__debug_scripts.remove(script)
def getChild(self, path, request):
if path == "":
return self
else:
return resource.Resource.getChild(self, path, request)
def getChildWithDefault(self, path, request):
# Calculate the request base
header = request.getHeader('x-deluge-base')
base = header if header else component.get("DelugeWeb").base
# validate the base parameter
if not base:
base = '/'
if base[0] != '/':
base = '/' + base
if base[-1] != '/':
base += '/'
request.base = base.encode('idna')
return resource.Resource.getChildWithDefault(self, path, request)
def render(self, request):
debug = False
if 'debug' in request.args:
debug_arg = request.args.get('debug')[-1]
if debug_arg in ('true', 'yes', '1'):
debug = True
else:
debug = False
dev = 'dev' in common.get_version()
if 'dev' in request.args:
dev_arg = request.args.get('dev')[-1]
if dev_arg in ('true', 'yes' '1'):
dev = True
else:
dev = False
if dev:
mode = 'dev'
elif debug:
mode = 'debug'
else:
mode = None
scripts = component.get("Scripts").get_scripts(mode)
scripts.insert(0, "gettext.js")
template = Template(filename=rpath("index.html"))
request.setHeader("content-type", "text/html; charset=utf-8")
web_config = component.get("Web").get_config()
web_config["base"] = request.base
config = dict([(key, web_config[key]) for key in UI_CONFIG_KEYS])
js_config = common.json.dumps(config)
return template.render(scripts=scripts, stylesheets=self.stylesheets,
debug=debug, base=request.base, js_config=js_config)
class ServerContextFactory:
def getContext(self):
"""Creates an SSL context."""
ctx = SSL.Context(SSL.SSLv3_METHOD)
deluge_web = component.get("DelugeWeb")
log.debug("Enabling SSL using:")
log.debug("Pkey: %s", deluge_web.pkey)
log.debug("Cert: %s", deluge_web.cert)
ctx.use_privatekey_file(configmanager.get_config_dir(deluge_web.pkey))
ctx.use_certificate_chain_file(configmanager.get_config_dir(deluge_web.cert))
return ctx
class DelugeWeb(component.Component):
def __init__(self):
super(DelugeWeb, self).__init__("DelugeWeb")
self.config = configmanager.ConfigManager("web.conf", CONFIG_DEFAULTS)
# Check to see if a configuration from the web interface prior to 1.2
# exists and convert it over.
if os.path.exists(configmanager.get_config_dir("webui06.conf")):
old_config = configmanager.ConfigManager("webui06.conf")
if old_config.config:
# we have an old config file here to handle so we should move
# all the values across to the new config file, and then remove
# it.
for key in OLD_CONFIG_KEYS:
if key in old_config:
self.config[key] = old_config[key]
# We need to base64 encode the passwords since json can't handle
# them otherwise.
from base64 import encodestring
self.config["old_pwd_md5"] = encodestring(old_config["pwd_md5"])
self.config["old_pwd_salt"] = encodestring(old_config["pwd_salt"])
# Save our config and if it saved successfully then rename the
# old configuration file.
if self.config.save():
config_dir = os.path.dirname(old_config.config_file)
backup_path = os.path.join(config_dir, 'web.conf.old')
os.rename(old_config.config_file, backup_path)
del old_config
self.socket = None
self.top_level = TopLevel()
self.site = server.Site(self.top_level)
self.port = self.config["port"]
self.https = self.config["https"]
self.pkey = self.config["pkey"]
self.cert = self.config["cert"]
self.base = self.config["base"]
self.web_api = WebApi()
self.auth = Auth()
# Initalize the plugins
self.plugins = PluginManager()
def install_signal_handlers(self):
# Since twisted assigns itself all the signals may as well make
# use of it.
reactor.addSystemEventTrigger("after", "shutdown", self.shutdown)
# Twisted doesn't handle windows specific signals so we still
# need to attach to those to handle the close correctly.
if common.windows_check():
from win32api import SetConsoleCtrlHandler
from win32con import CTRL_CLOSE_EVENT, CTRL_SHUTDOWN_EVENT
def win_handler(ctrl_type):
log.debug("ctrl type: %s", ctrl_type)
if ctrl_type == CTRL_CLOSE_EVENT or \
ctrl_type == CTRL_SHUTDOWN_EVENT:
self.shutdown()
return 1
SetConsoleCtrlHandler(win_handler)
def start(self, start_reactor=True):
log.info("%s %s.", _("Starting server in PID"), os.getpid())
if self.https:
self.start_ssl()
else:
self.start_normal()
component.get("JSON").enable()
if start_reactor:
reactor.run()
def start_normal(self):
self.socket = reactor.listenTCP(self.port, self.site)
log.info("serving on %s:%s view at http://127.0.0.1:%s", "0.0.0.0",
self.port, self.port)
def start_ssl(self):
check_ssl_keys()
self.socket = reactor.listenSSL(self.port, self.site, ServerContextFactory())
log.info("serving on %s:%s view at https://127.0.0.1:%s", "0.0.0.0",
self.port, self.port)
def stop(self):
log.info("Shutting down webserver")
component.get("JSON").disable()
self.plugins.disable_plugins()
log.debug("Saving configuration file")
self.config.save()
if self.socket:
d = self.socket.stopListening()
self.socket = None
else:
d = defer.Deferred()
d.callback(False)
return d
def shutdown(self, *args):
self.stop()
try:
reactor.stop()
except error.ReactorNotRunning:
log.debug("Reactor not running")
if __name__ == "__builtin__":
deluge_web = DelugeWeb()
application = service.Application("DelugeWeb")
sc = service.IServiceCollection(application)
i = internet.TCPServer(deluge_web.port, deluge_web.site)
i.setServiceParent(sc)
elif __name__ == "__main__":
deluge_web = DelugeWeb()
deluge_web.start()
| inaz2/deluge-hack | deluge/ui/web/server.py | Python | gpl-3.0 | 24,897 |
"""Support for PlayStation 4 consoles."""
import logging
import asyncio
import pyps4_2ndscreen.ps4 as pyps4
from pyps4_2ndscreen.errors import NotReady
from homeassistant.core import callback
from homeassistant.components.media_player import ENTITY_IMAGE_URL, MediaPlayerDevice
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_GAME,
MEDIA_TYPE_APP,
SUPPORT_SELECT_SOURCE,
SUPPORT_PAUSE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.components.ps4 import format_unique_id, load_games, save_games
from homeassistant.const import (
ATTR_LOCKED,
CONF_HOST,
CONF_NAME,
CONF_REGION,
CONF_TOKEN,
STATE_IDLE,
STATE_STANDBY,
STATE_PLAYING,
)
from homeassistant.helpers import device_registry, entity_registry
from .const import (
ATTR_MEDIA_IMAGE_URL,
DEFAULT_ALIAS,
DOMAIN as PS4_DOMAIN,
PS4_DATA,
REGIONS as deprecated_regions,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_PS4 = (
SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_SELECT_SOURCE
)
ICON = "mdi:playstation"
MEDIA_IMAGE_DEFAULT = None
DEFAULT_RETRIES = 2
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up PS4 from a config entry."""
config = config_entry
creds = config.data[CONF_TOKEN]
device_list = []
for device in config.data["devices"]:
host = device[CONF_HOST]
region = device[CONF_REGION]
name = device[CONF_NAME]
ps4 = pyps4.Ps4Async(host, creds, device_name=DEFAULT_ALIAS)
device_list.append(PS4Device(config, name, host, region, ps4, creds))
async_add_entities(device_list, update_before_add=True)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Not Implemented."""
pass
class PS4Device(MediaPlayerDevice):
"""Representation of a PS4."""
def __init__(self, config, name, host, region, ps4, creds):
"""Initialize the ps4 device."""
self._entry_id = config.entry_id
self._ps4 = ps4
self._host = host
self._name = name
self._region = region
self._creds = creds
self._state = None
self._media_content_id = None
self._media_title = None
self._media_image = None
self._media_type = None
self._source = None
self._games = {}
self._source_list = []
self._retry = 0
self._disconnected = False
self._info = None
self._unique_id = None
@callback
def status_callback(self):
"""Handle status callback. Parse status."""
self._parse_status()
@callback
def schedule_update(self):
"""Schedules update with HA."""
self.async_schedule_update_ha_state()
@callback
def subscribe_to_protocol(self):
"""Notify protocol to callback with update changes."""
self.hass.data[PS4_DATA].protocol.add_callback(self._ps4, self.status_callback)
@callback
def unsubscribe_to_protocol(self):
"""Notify protocol to remove callback."""
self.hass.data[PS4_DATA].protocol.remove_callback(
self._ps4, self.status_callback
)
def check_region(self):
"""Display logger msg if region is deprecated."""
# Non-Breaking although data returned may be inaccurate.
if self._region in deprecated_regions:
_LOGGER.info(
"""Region: %s has been deprecated.
Please remove PS4 integration
and Re-configure again to utilize
current regions""",
self._region,
)
async def async_added_to_hass(self):
"""Subscribe PS4 events."""
self.hass.data[PS4_DATA].devices.append(self)
self.check_region()
async def async_update(self):
"""Retrieve the latest data."""
if self._ps4.ddp_protocol is not None:
# Request Status with asyncio transport.
self._ps4.get_status()
# Don't attempt to connect if entity is connected or if,
# PS4 is in standby or disconnected from LAN or powered off.
if (
not self._ps4.connected
and not self._ps4.is_standby
and self._ps4.is_available
):
try:
await self._ps4.async_connect()
except NotReady:
pass
# Try to ensure correct status is set on startup for device info.
if self._ps4.ddp_protocol is None:
# Use socket.socket.
await self.hass.async_add_executor_job(self._ps4.get_status)
if self._info is None:
# Add entity to registry.
await self.async_get_device_info(self._ps4.status)
self._ps4.ddp_protocol = self.hass.data[PS4_DATA].protocol
self.subscribe_to_protocol()
self._parse_status()
def _parse_status(self):
"""Parse status."""
status = self._ps4.status
if status is not None:
self._games = load_games(self.hass)
if self._games:
self.get_source_list()
self._retry = 0
self._disconnected = False
if status.get("status") == "Ok":
title_id = status.get("running-app-titleid")
name = status.get("running-app-name")
if title_id and name is not None:
self._state = STATE_PLAYING
if self._media_content_id != title_id:
self._media_content_id = title_id
if self._use_saved():
_LOGGER.debug("Using saved data for media: %s", title_id)
self.schedule_update()
return
self._media_title = name
self._source = self._media_title
self._media_type = None
# Get data from PS Store.
asyncio.ensure_future(self.async_get_title_data(title_id, name))
else:
if self._state != STATE_IDLE:
self.idle()
else:
if self._state != STATE_STANDBY:
self.state_standby()
elif self._retry > DEFAULT_RETRIES:
self.state_unknown()
else:
self._retry += 1
def _use_saved(self) -> bool:
"""Return True, Set media attrs if data is locked."""
if self._media_content_id in self._games:
store = self._games[self._media_content_id]
# If locked get attributes from file.
locked = store.get(ATTR_LOCKED)
if locked:
self._media_title = store.get(ATTR_MEDIA_TITLE)
self._source = self._media_title
self._media_image = store.get(ATTR_MEDIA_IMAGE_URL)
self._media_type = store.get(ATTR_MEDIA_CONTENT_TYPE)
return True
return False
def idle(self):
"""Set states for state idle."""
self.reset_title()
self._state = STATE_IDLE
self.schedule_update()
def state_standby(self):
"""Set states for state standby."""
self.reset_title()
self._state = STATE_STANDBY
self.schedule_update()
def state_unknown(self):
"""Set states for state unknown."""
self.reset_title()
self._state = None
if self._disconnected is False:
_LOGGER.warning("PS4 could not be reached")
self._disconnected = True
self._retry = 0
def reset_title(self):
"""Update if there is no title."""
self._media_title = None
self._media_content_id = None
self._media_type = None
self._source = None
async def async_get_title_data(self, title_id, name):
"""Get PS Store Data."""
from pyps4_2ndscreen.errors import PSDataIncomplete
app_name = None
art = None
media_type = None
try:
title = await self._ps4.async_get_ps_store_data(
name, title_id, self._region
)
except PSDataIncomplete:
title = None
except asyncio.TimeoutError:
title = None
_LOGGER.error("PS Store Search Timed out")
else:
if title is not None:
app_name = title.name
art = title.cover_art
# Assume media type is game if not app.
if title.game_type != "App":
media_type = MEDIA_TYPE_GAME
else:
media_type = MEDIA_TYPE_APP
else:
_LOGGER.error(
"Could not find data in region: %s for PS ID: %s",
self._region,
title_id,
)
finally:
self._media_title = app_name or name
self._source = self._media_title
self._media_image = art or None
self._media_type = media_type
self.update_list()
self.schedule_update()
def update_list(self):
"""Update Game List, Correct data if different."""
if self._media_content_id in self._games:
store = self._games[self._media_content_id]
if (
store.get(ATTR_MEDIA_TITLE) != self._media_title
or store.get(ATTR_MEDIA_IMAGE_URL) != self._media_image
):
self._games.pop(self._media_content_id)
if self._media_content_id not in self._games:
self.add_games(
self._media_content_id,
self._media_title,
self._media_image,
self._media_type,
)
self._games = load_games(self.hass)
self.get_source_list()
def get_source_list(self):
"""Parse data entry and update source list."""
games = []
for data in self._games.values():
games.append(data[ATTR_MEDIA_TITLE])
self._source_list = sorted(games)
def add_games(self, title_id, app_name, image, g_type, is_locked=False):
"""Add games to list."""
games = self._games
if title_id is not None and title_id not in games:
game = {
title_id: {
ATTR_MEDIA_TITLE: app_name,
ATTR_MEDIA_IMAGE_URL: image,
ATTR_MEDIA_CONTENT_TYPE: g_type,
ATTR_LOCKED: is_locked,
}
}
games.update(game)
save_games(self.hass, games)
async def async_get_device_info(self, status):
"""Set device info for registry."""
# If cannot get status on startup, assume info from registry.
if status is None:
_LOGGER.info("Assuming status from registry")
e_registry = await entity_registry.async_get_registry(self.hass)
d_registry = await device_registry.async_get_registry(self.hass)
for entity_id, entry in e_registry.entities.items():
if entry.config_entry_id == self._entry_id:
self._unique_id = entry.unique_id
self.entity_id = entity_id
break
for device in d_registry.devices.values():
if self._entry_id in device.config_entries:
self._info = {
"name": device.name,
"model": device.model,
"identifiers": device.identifiers,
"manufacturer": device.manufacturer,
"sw_version": device.sw_version,
}
break
else:
_sw_version = status["system-version"]
_sw_version = _sw_version[1:4]
sw_version = "{}.{}".format(_sw_version[0], _sw_version[1:])
self._info = {
"name": status["host-name"],
"model": "PlayStation 4",
"identifiers": {(PS4_DOMAIN, status["host-id"])},
"manufacturer": "Sony Interactive Entertainment Inc.",
"sw_version": sw_version,
}
self._unique_id = format_unique_id(self._creds, status["host-id"])
async def async_will_remove_from_hass(self):
"""Remove Entity from Hass."""
# Close TCP Transport.
if self._ps4.connected:
await self._ps4.close()
self.unsubscribe_to_protocol()
self.hass.data[PS4_DATA].devices.remove(self)
@property
def device_info(self):
"""Return information about the device."""
return self._info
@property
def unique_id(self):
"""Return Unique ID for entity."""
return self._unique_id
@property
def entity_picture(self):
"""Return picture."""
if self._state == STATE_PLAYING and self._media_content_id is not None:
image_hash = self.media_image_hash
if image_hash is not None:
return ENTITY_IMAGE_URL.format(
self.entity_id, self.access_token, image_hash
)
return MEDIA_IMAGE_DEFAULT
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Icon."""
return ICON
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return self._media_type
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._media_content_id is None:
return MEDIA_IMAGE_DEFAULT
return self._media_image
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def supported_features(self):
"""Media player features that are supported."""
return SUPPORT_PS4
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
async def async_turn_off(self):
"""Turn off media player."""
await self._ps4.standby()
async def async_turn_on(self):
"""Turn on the media player."""
self._ps4.wakeup()
async def async_media_pause(self):
"""Send keypress ps to return to menu."""
await self.async_send_remote_control("ps")
async def async_media_stop(self):
"""Send keypress ps to return to menu."""
await self.async_send_remote_control("ps")
async def async_select_source(self, source):
"""Select input source."""
for title_id, data in self._games.items():
game = data[ATTR_MEDIA_TITLE]
if (
source.lower().encode(encoding="utf-8")
== game.lower().encode(encoding="utf-8")
or source == title_id
):
_LOGGER.debug(
"Starting PS4 game %s (%s) using source %s", game, title_id, source
)
await self._ps4.start_title(title_id, self._media_content_id)
return
_LOGGER.warning("Could not start title. '%s' is not in source list", source)
return
async def async_send_command(self, command):
"""Send Button Command."""
await self.async_send_remote_control(command)
async def async_send_remote_control(self, command):
"""Send RC command."""
await self._ps4.remote_control(command)
| joopert/home-assistant | homeassistant/components/ps4/media_player.py | Python | apache-2.0 | 16,283 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Database models for collections."""
# General imports.
import re
from operator import itemgetter
from flask import g, url_for
from intbitset import intbitset
from invenio.base.globals import cfg
from invenio.base.i18n import _, gettext_set_language
from invenio.ext.sqlalchemy import db
from invenio.ext.sqlalchemy.utils import attribute_multi_dict_collection
from invenio.modules.formatter.registry import output_formats
from invenio.modules.search.models import Field, Fieldvalue
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm.collections import attribute_mapped_collection
from werkzeug.utils import cached_property
# Create your models here.
external_collection_mapper = attribute_multi_dict_collection(
creator=lambda k, v: CollectionExternalcollection(type=k,
externalcollection=v),
key_attr=lambda obj: obj.type,
val_attr=lambda obj: obj.externalcollection)
class Collection(db.Model):
"""Represent a Collection record."""
def __repr__(self):
"""Return class representation."""
return 'Collection <id: {0.id}, name: {0.name}, dbquery: {0.query}, ' \
'nbrecs: {0.nbrecs}>'.format(self)
def __unicode__(self):
suffix = ' ({0})'.format(_('default')) if self.id == 1 else ''
return u"{0.id}. {0.name}{1}".format(self, suffix)
def __str__(self):
return unicode(self).encode('utf-8')
__tablename__ = 'collection'
id = db.Column(db.MediumInteger(9, unsigned=True),
primary_key=True)
name = db.Column(db.String(255), unique=True, index=True,
nullable=False)
dbquery = db.Column(db.Text(20), nullable=True,
index=True)
@property
def nbrecs(self):
"""Number of records in the collection."""
from .cache import get_collection_nbrecs
return get_collection_nbrecs(self.name)
@property
def reclist(self):
"""Return hit set with record identifiers."""
from .cache import get_collection_reclist
return get_collection_reclist(self.name)
@property
def is_hosted(self):
"""Return True if collection is hosted elsewhere."""
return self.dbquery.startswith('hostedcollection:') if self.dbquery \
else False
_names = db.relationship(lambda: Collectionname,
backref='collection',
collection_class=attribute_mapped_collection(
'ln_type'),
cascade="all, delete, delete-orphan")
names = association_proxy(
'_names', 'value',
creator=lambda k, v: Collectionname(ln_type=k, value=v)
)
_boxes = db.relationship(lambda: Collectionboxname,
backref='collection',
collection_class=attribute_mapped_collection(
'ln_type'),
cascade="all, delete, delete-orphan")
boxes = association_proxy(
'_boxes', 'value',
creator=lambda k, v: Collectionboxname(ln_type=k, value=v)
)
_formatoptions = association_proxy('formats', 'format')
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def formatoptions(self):
"""Return list of format options."""
if len(self._formatoptions):
return [dict(f) for f in self._formatoptions]
else:
return [{'code': u'hb',
'name': _("HTML %(format)s", format=_("brief")),
'content_type': u'text/html'}]
formatoptions = property(formatoptions)
_examples_example = association_proxy('_examples', 'example')
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def examples(self):
"""Return list of example queries."""
return list(self._examples_example)
@property
def name_ln(self):
from invenio.legacy.search_engine import get_coll_i18nname
return get_coll_i18nname(self.name,
getattr(g, 'ln', cfg['CFG_SITE_LANG']))
# Another possible implementation with cache memoize
# @cache.memoize
# try:
# return db.object_session(self).query(Collectionname).\
# with_parent(self).filter(db.and_(Collectionname.ln==g.ln,
# Collectionname.type=='ln')).first().value
# except Exception:
# return self.name
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def portalboxes_ln(self):
return db.object_session(self).query(CollectionPortalbox).\
with_parent(self).\
options(db.joinedload_all(CollectionPortalbox.portalbox)).\
filter(CollectionPortalbox.ln == g.ln).\
order_by(db.desc(CollectionPortalbox.score)).all()
@property
def most_specific_dad(self):
results = sorted(
db.object_session(self).query(Collection).join(
Collection.sons
).filter(CollectionCollection.id_son == self.id).all(),
key=lambda c: c.nbrecs)
return results[0] if len(results) else None
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def is_restricted(self):
"""Return ``True`` if the collection is restricted."""
from invenio.legacy.search_engine import collection_restricted_p
return collection_restricted_p(self.name)
@property
def type(self):
"""Return relation type."""
p = re.compile("\d+:.*")
if self.dbquery is not None and \
p.match(self.dbquery.lower()):
return 'r'
else:
return 'v'
_collection_children = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: Collection.id == CollectionCollection.id_dad,
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
_collection_children_r = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: db.and_(
Collection.id == CollectionCollection.id_dad,
CollectionCollection.type == 'r'),
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
_collection_children_v = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: db.and_(
Collection.id == CollectionCollection.id_dad,
CollectionCollection.type == 'v'),
foreign_keys=lambda: CollectionCollection.id_dad,
order_by=lambda: db.asc(CollectionCollection.score)
)
collection_parents = db.relationship(
lambda: CollectionCollection,
collection_class=ordering_list('score'),
primaryjoin=lambda: Collection.id == CollectionCollection.id_son,
foreign_keys=lambda: CollectionCollection.id_son,
order_by=lambda: db.asc(CollectionCollection.score)
)
collection_children = association_proxy('_collection_children', 'son')
collection_children_r = association_proxy(
'_collection_children_r', 'son',
creator=lambda son: CollectionCollection(id_son=son.id, type='r')
)
collection_children_v = association_proxy(
'_collection_children_v', 'son',
creator=lambda son: CollectionCollection(id_son=son.id, type='v')
)
_externalcollections = db.relationship(
lambda: CollectionExternalcollection,
cascade="all, delete, delete-orphan"
)
def _externalcollections_type(type_):
return association_proxy(
'_externalcollections_' + str(type_),
'externalcollection',
creator=lambda ext: CollectionExternalcollection(
externalcollection=ext, type=type_))
externalcollections_0 = _externalcollections_type(0)
externalcollections_1 = _externalcollections_type(1)
externalcollections_2 = _externalcollections_type(2)
externalcollections = db.relationship(
lambda: CollectionExternalcollection,
collection_class=external_collection_mapper,
cascade="all, delete, delete-orphan"
)
# Search options
def _make_field_fieldvalue(type_):
return db.relationship(
lambda: CollectionFieldFieldvalue,
primaryjoin=lambda: db.and_(
Collection.id == CollectionFieldFieldvalue.id_collection,
CollectionFieldFieldvalue.type == type_),
order_by=lambda: CollectionFieldFieldvalue.score)
_search_within = _make_field_fieldvalue('sew')
_search_options = _make_field_fieldvalue('seo')
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def search_within(self):
"""
Collect search within options.
"""
default = [('', _('any field'))]
found = [(o.field.code, o.field.name_ln) for o in self._search_within]
if not found:
found = [(f.name.replace(' ', ''), f.name_ln)
for f in Field.query.filter(Field.name.in_(
cfg['CFG_WEBSEARCH_SEARCH_WITHIN'])).all()]
return default + sorted(found, key=itemgetter(1))
@property
# @cache.memoize(make_name=lambda fname: fname + '::' + g.ln)
def search_options(self):
"""Return search options."""
return self._search_options
@cached_property
def ancestors(self):
"""Get list of parent collection ids."""
output = set([self])
for c in self.dads:
output |= c.dad.ancestors
return output
@cached_property
def ancestors_ids(self):
"""Get list of parent collection ids."""
output = intbitset([self.id])
for c in self.dads:
ancestors = c.dad.ancestors_ids
if self.id in ancestors:
raise
output |= ancestors
return output
@cached_property
def descendants_ids(self):
"""Get list of child collection ids."""
output = intbitset([self.id])
for c in self.sons:
descendants = c.son.descendants_ids
if self.id in descendants:
raise
output |= descendants
return output
# Gets the list of localized names as an array
collection_names = db.relationship(
lambda: Collectionname,
primaryjoin=lambda: Collection.id == Collectionname.id_collection,
foreign_keys=lambda: Collectionname.id_collection
)
def translation(self, lang):
"""Get the translation according to the language code."""
try:
return db.object_session(self).query(Collectionname).\
with_parent(self).filter(db.and_(
Collectionname.ln == lang,
Collectionname.type == 'ln'
)).first().value
except Exception:
return ""
@property
def sort_methods(self):
"""Get sort methods for collection.
If not sort methods are defined for a collection the root collections
sort methods are retuned. If not methods are defined for the root
collection, all possible sort methods are returned.
Note: Noth sorting methods and ranking methods are now defined via
the sorter.
"""
from invenio.modules.sorter.models import BsrMETHOD, \
Collection_bsrMETHOD
for coll_id in (self.id, 1):
methods = Collection_bsrMETHOD.query.filter_by(
id_collection=coll_id
).order_by(
Collection_bsrMETHOD.score
).options(
db.joinedload(Collection_bsrMETHOD.bsrMETHOD)
).all()
if len(methods) > 0:
return map(lambda obj: obj.bsrMETHOD, methods)
return BsrMETHOD.query.order_by(BsrMETHOD.name).all()
def get_collectionbox_name(self, ln=None, box_type="r"):
"""Return collection-specific labelling subtrees.
- 'Focus on': regular collection
- 'Narrow by': virtual collection
- 'Latest addition': boxes
If translation for given language does not exist, use label
for CFG_SITE_LANG. If no custom label is defined for
CFG_SITE_LANG, return default label for the box.
:param ln: the language of the label
:param box_type: can be 'r' (=Narrow by), 'v' (=Focus on),
'l' (=Latest additions)
"""
if ln is None:
ln = g.ln
collectionboxnamequery = db.object_session(self).query(
Collectionboxname).with_parent(self)
try:
collectionboxname = collectionboxnamequery.filter(db.and_(
Collectionboxname.ln == ln,
Collectionboxname.type == box_type,
)).one()
except Exception:
try:
collectionboxname = collectionboxnamequery.filter(db.and_(
Collectionboxname.ln == ln,
Collectionboxname.type == box_type,
)).one()
except Exception:
collectionboxname = None
if collectionboxname is None:
# load the right message language
_ = gettext_set_language(ln)
return _(Collectionboxname.TYPES.get(box_type, ''))
else:
return collectionboxname.value
portal_boxes_ln = db.relationship(
lambda: CollectionPortalbox,
collection_class=ordering_list('score'),
primaryjoin=lambda:
Collection.id == CollectionPortalbox.id_collection,
foreign_keys=lambda: CollectionPortalbox.id_collection,
order_by=lambda: db.asc(CollectionPortalbox.score))
def breadcrumbs(self, builder=None, ln=None):
"""Return breadcrumbs for collection."""
ln = cfg.get('CFG_SITE_LANG') if ln is None else ln
breadcrumbs = []
# Get breadcrumbs for most specific dad if it exists.
if self.most_specific_dad is not None:
breadcrumbs = self.most_specific_dad.breadcrumbs(builder=builder,
ln=ln)
if builder is not None:
crumb = builder(self)
else:
crumb = dict(
text=self.name_ln,
url=url_for('collections.collection', name=self.name))
breadcrumbs.append(crumb)
return breadcrumbs
class Collectionname(db.Model):
"""Represent a Collectionname record."""
__tablename__ = 'collectionname'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id),
nullable=False, primary_key=True)
ln = db.Column(db.Char(5), nullable=False, primary_key=True,
server_default='')
type = db.Column(db.Char(3), nullable=False, primary_key=True,
server_default='sn')
value = db.Column(db.String(255), nullable=False)
@db.hybrid_property
def ln_type(self):
return (self.ln, self.type)
@ln_type.setter
def set_ln_type(self, value):
(self.ln, self.type) = value
class Collectionboxname(db.Model):
"""Represent a Collectionboxname record."""
__tablename__ = 'collectionboxname'
TYPES = {
'v': 'Focus on:',
'r': 'Narrow by collection:',
'l': 'Latest additions:',
}
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id),
nullable=False, primary_key=True)
ln = db.Column(db.Char(5), nullable=False, primary_key=True,
server_default='')
type = db.Column(db.Char(3), nullable=False, primary_key=True,
server_default='r')
value = db.Column(db.String(255), nullable=False)
@db.hybrid_property
def ln_type(self):
return (self.ln, self.type)
@ln_type.setter
def set_ln_type(self, value):
(self.ln, self.type) = value
class Collectiondetailedrecordpagetabs(db.Model):
"""Represent a Collectiondetailedrecordpagetabs record."""
__tablename__ = 'collectiondetailedrecordpagetabs'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id),
nullable=False, primary_key=True)
tabs = db.Column(db.String(255), nullable=False,
server_default='')
collection = db.relationship(Collection,
backref='collectiondetailedrecordpagetabs')
class CollectionCollection(db.Model):
"""Represent a CollectionCollection record."""
__tablename__ = 'collection_collection'
id_dad = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
id_son = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
type = db.Column(db.Char(1), nullable=False,
server_default='r')
score = db.Column(db.TinyInteger(4, unsigned=True), nullable=False,
server_default='0')
son = db.relationship(Collection, primaryjoin=id_son == Collection.id,
backref='dads',
# FIX
# collection_class=db.attribute_mapped_collection('score'),
order_by=db.asc(score))
dad = db.relationship(Collection, primaryjoin=id_dad == Collection.id,
backref='sons', order_by=db.asc(score))
class Example(db.Model):
"""Represent a Example record."""
__tablename__ = 'example'
id = db.Column(db.MediumInteger(9, unsigned=True), primary_key=True,
autoincrement=True)
type = db.Column(db.Text, nullable=False)
body = db.Column(db.Text, nullable=False)
class CollectionExample(db.Model):
"""Represent a CollectionExample record."""
__tablename__ = 'collection_example'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
id_example = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Example.id), primary_key=True)
score = db.Column(db.TinyInteger(4, unsigned=True), nullable=False,
server_default='0')
collection = db.relationship(Collection, backref='_examples',
order_by=score)
example = db.relationship(Example, backref='collections', order_by=score)
class Portalbox(db.Model):
"""Represent a Portalbox record."""
__tablename__ = 'portalbox'
id = db.Column(db.MediumInteger(9, unsigned=True), autoincrement=True,
primary_key=True)
title = db.Column(db.Text, nullable=False)
body = db.Column(db.Text, nullable=False)
def get_pbx_pos():
"""Returns a list of all the positions for a portalbox"""
position = {}
position["rt"] = "Right Top"
position["lt"] = "Left Top"
position["te"] = "Title Epilog"
position["tp"] = "Title Prolog"
position["ne"] = "Narrow by coll epilog"
position["np"] = "Narrow by coll prolog"
return position
class CollectionPortalbox(db.Model):
"""Represent a CollectionPortalbox record."""
__tablename__ = 'collection_portalbox'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
id_portalbox = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Portalbox.id), primary_key=True)
ln = db.Column(db.Char(5), primary_key=True, server_default='',
nullable=False)
position = db.Column(db.Char(3), nullable=False,
server_default='top')
score = db.Column(db.TinyInteger(4, unsigned=True),
nullable=False,
server_default='0')
collection = db.relationship(Collection, backref='portalboxes',
order_by=score)
portalbox = db.relationship(Portalbox, backref='collections',
order_by=score)
class Externalcollection(db.Model):
"""Represent a Externalcollection record."""
__tablename__ = 'externalcollection'
id = db.Column(db.MediumInteger(9, unsigned=True),
primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False,
server_default='')
@property
def engine(self):
from invenio.legacy.websearch_external_collections.searcher import (
external_collections_dictionary
)
if self.name in external_collections_dictionary:
return external_collections_dictionary[self.name]
class CollectionExternalcollection(db.Model):
"""Represent a CollectionExternalcollection record."""
__tablename__ = 'collection_externalcollection'
id_collection = db.Column(db.MediumInteger(9,
unsigned=True),
db.ForeignKey(Collection.id), primary_key=True,
server_default='0')
id_externalcollection = db.Column(db.MediumInteger(9,
unsigned=True),
db.ForeignKey(Externalcollection.id),
primary_key=True,
server_default='0')
type = db.Column(db.TinyInteger(4, unsigned=True),
server_default='0',
nullable=False)
def _collection_type(type_):
return db.relationship(
Collection,
primaryjoin=lambda: db.and_(
CollectionExternalcollection.id_collection == Collection.id,
CollectionExternalcollection.type == type_),
backref='_externalcollections_{0}'.format(str(type_))
)
collection_0 = _collection_type(0)
collection_1 = _collection_type(1)
collection_2 = _collection_type(2)
externalcollection = db.relationship(Externalcollection)
class CollectionFormat(db.Model):
"""Represent a CollectionFormat record."""
__tablename__ = 'collection_format'
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id), primary_key=True)
format_code = db.Column('format', db.String(10), primary_key=True)
score = db.Column(db.TinyInteger(4, unsigned=True),
nullable=False, server_default='0')
collection = db.relationship(
Collection, backref=db.backref(
'formats', order_by=db.desc(score)
), order_by=db.desc(score))
@property
def format(self):
"""Return output format definition."""
return output_formats[self.format_code]
class CollectionFieldFieldvalue(db.Model):
"""Represent a CollectionFieldFieldvalue record."""
__tablename__ = 'collection_field_fieldvalue'
id = db.Column(db.MediumInteger(9, unsigned=True), autoincrement=True,
primary_key=True, nullable=False)
id_collection = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Collection.id),
nullable=False)
id_field = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Field.id),
nullable=False)
_id_fieldvalue = db.Column(db.MediumInteger(9, unsigned=True),
db.ForeignKey(Fieldvalue.id),
nullable=True, default=None,
name="id_fieldvalue")
type = db.Column(db.Char(3), nullable=False,
server_default='src')
score = db.Column(db.TinyInteger(4, unsigned=True), nullable=False,
server_default='0')
score_fieldvalue = db.Column(db.TinyInteger(4, unsigned=True),
nullable=False, server_default='0')
collection = db.relationship(Collection, backref='field_fieldvalues',
order_by=score)
field = db.relationship(Field, backref='collection_fieldvalues',
lazy='joined')
fieldvalue = db.relationship(Fieldvalue, backref='collection_fields',
lazy='joined')
@db.hybrid_property
def id_fieldvalue(self):
"""Get id_fieldvalue."""
return self._id_fieldvalue
@id_fieldvalue.setter
def id_fieldvalue(self, value):
"""Set id_fieldvalue."""
self._id_fieldvalue = value or None
class FacetCollection(db.Model):
"""Facet configuration for collection."""
__tablename__ = 'facet_collection'
id = db.Column(db.Integer, primary_key=True)
id_collection = db.Column(db.Integer, db.ForeignKey(Collection.id))
order = db.Column(db.Integer)
facet_name = db.Column(db.String(80))
collection = db.relationship(Collection, backref='facets')
def __repr__(self):
"""Return class representation."""
return ('FacetCollection <id: {0.id}, id_collection: '
'{0.id_collection}, order: {0.order}, '
'facet_name: {0.facet_name}>'.format(self))
@classmethod
def is_place_taken(cls, id_collection, order):
"""Check if there is already a facet on the given position.
.. note:: This works well as a pre-check, however saving can still fail
if somebody else creates the same record in other session
(phantom reads).
"""
return bool(cls.query.filter(
cls.id_collection == id_collection,
cls.order == order).count())
@classmethod
def is_duplicated(cls, id_collection, facet_name):
"""Check if the given facet is already assigned to this collection.
.. note:: This works well as a pre-check, however saving can still fail
if somebody else creates the same record in other session
(phantom reads).
"""
return bool(cls.query.filter(
cls.id_collection == id_collection,
cls.facet_name == facet_name).count())
__all__ = (
'Collection',
'Collectionname',
'Collectiondetailedrecordpagetabs',
'CollectionCollection',
'Example',
'CollectionExample',
'Portalbox',
'CollectionPortalbox',
'Externalcollection',
'CollectionExternalcollection',
'CollectionFormat',
'CollectionFieldFieldvalue',
'FacetCollection',
)
| chokribr/invenio | invenio/modules/collections/models.py | Python | gpl-2.0 | 28,007 |
class Linearizer ():
def __init__ (self, header='', separator='', footer='', graph=None):
self.graph = graph
self.separator = separator
self.header = header
self.footer = footer
def linearize (self):
nodes = self.get_root_nodes()
nodes = self.expand_node_list(nodes)
words = (self.process_node(n) for n in nodes)
nonempty = [(w, n) for w, n in zip(words, nodes) if w is not None]
try:
words, nodes = (list(l) for l in zip(*nonempty))
except ValueError:
return ''
self.apply_boundaries(words, nodes)
return self.concat(words)
def get_root_nodes (self):
return [min(self._g.nodes())]
def expand_node_list (self, nodes):
change = True
while change is True:
change = False
next_nodes = []
for n in nodes:
try:
expanded = n.get('expanded')
except AttributeError:
n = self.graph.node[n].copy()
expanded = False
if expanded:
next_nodes.append(n)
else:
next_nodes += self.expand_node(n)
change = True
nodes = next_nodes
return nodes
def expand_node (self, n):
n['expanded'] = True
return [n]
def process_node (self, n):
return n['concept']
def apply_boundaries (self, words, nodes):
for i in range(len(nodes)):
if i == 0:
left = None
else:
left = nodes[i-1]
if i == len(nodes)-1:
right = None
else:
right = nodes[i+1]
words[i] = self.boundary(left, nodes[i], words[i], right)
def boundary (self, left, n, word, right):
return word
def concat (self, nodes):
return self.header+self.separator.join(nodes)+self.footer
| agarsev/grafeno | grafeno/linearizers/base.py | Python | agpl-3.0 | 2,009 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fast-Fourier Transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.framework import tensor_util as _tensor_util
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.util.tf_export import tf_export
def _infer_fft_length_for_rfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
return _array_ops.shape(input_tensor)[-fft_rank:]
# Otherwise, return a constant.
return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
return _array_ops.stack(fft_length)
# Otherwise, return a constant.
fft_length = fft_shape.as_list()
if fft_length:
fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
return _ops.convert_to_tensor(fft_length, _dtypes.int32)
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
"""Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
fft_shape = _tensor_util.constant_value_as_shape(fft_length)
# Edge case: skip padding empty tensors.
if (input_tensor.shape.ndims is not None and
any(dim.value == 0 for dim in input_tensor.shape.dims)):
return input_tensor
# If we know the shapes ahead of time, we can either skip or pre-compute the
# appropriate paddings. Otherwise, fall back to computing paddings in
# TensorFlow.
if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
# Slice the last FFT-rank dimensions from input_tensor's shape.
input_fft_shape = input_tensor.shape[-fft_shape.ndims:]
if input_fft_shape.is_fully_defined():
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_shape = fft_shape[:-1].concatenate(
fft_shape.dims[-1].value // 2 + 1)
paddings = [[0, max(fft_dim.value - input_dim.value, 0)]
for fft_dim, input_dim in zip(
fft_shape.dims, input_fft_shape.dims)]
if any(pad > 0 for _, pad in paddings):
outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -
fft_shape.ndims), 0)
return _array_ops.pad(input_tensor, outer_paddings + paddings)
return input_tensor
# If we can't determine the paddings ahead of time, then we have to pad. If
# the paddings end up as zero, tf.pad has a special-case that does no work.
input_rank = _array_ops.rank(input_tensor)
input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_length = _array_ops.concat([fft_length[:-1],
fft_length[-1:] // 2 + 1], 0)
fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
axis=1)
return _array_ops.pad(input_tensor, paddings)
def _rfft_wrapper(fft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
def _rfft(input_tensor, fft_length=None, name=None):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.float32)
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)
return fft_fn(input_tensor, fft_length, name)
_rfft.__doc__ = fft_fn.__doc__
return _rfft
def _irfft_wrapper(ifft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.irfft* that infers fft_length argument."""
def _irfft(input_tensor, fft_length=None, name=None):
"""Wrapper irfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64)
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length,
is_reverse=True)
return ifft_fn(input_tensor, fft_length, name)
_irfft.__doc__ = ifft_fn.__doc__
return _irfft
# FFT/IFFT 1/2/3D are exported via
# third_party/tensorflow/core/api_def/python_api/
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft")
tf_export("signal.rfft", v1=["signal.rfft", "spectral.rfft"])(rfft)
irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft")
tf_export("signal.irfft", v1=["signal.irfft", "spectral.irfft"])(irfft)
rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d")
tf_export("signal.rfft2d", v1=["signal.rfft2d", "spectral.rfft2d"])(rfft2d)
irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d")
tf_export("signal.irfft2d", v1=["signal.irfft2d", "spectral.irfft2d"])(irfft2d)
rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d")
tf_export("signal.rfft3d", v1=["signal.rfft3d", "spectral.rfft3d"])(rfft3d)
irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d")
tf_export("signal.irfft3d", v1=["signal.irfft3d", "spectral.irfft3d"])(irfft3d)
def _fft_size_for_grad(grad, rank):
return _math_ops.reduce_prod(_array_ops.shape(grad)[-rank:])
@_ops.RegisterGradient("FFT")
def _fft_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype)
return ifft(grad) * size
@_ops.RegisterGradient("IFFT")
def _ifft_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype.real_dtype),
grad.dtype)
return fft(grad) * rsize
@_ops.RegisterGradient("FFT2D")
def _fft2d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype)
return ifft2d(grad) * size
@_ops.RegisterGradient("IFFT2D")
def _ifft2d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype.real_dtype),
grad.dtype)
return fft2d(grad) * rsize
@_ops.RegisterGradient("FFT3D")
def _fft3d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype)
return ifft3d(grad) * size
@_ops.RegisterGradient("IFFT3D")
def _ifft3d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype.real_dtype),
grad.dtype)
return fft3d(grad) * rsize
def _rfft_grad_helper(rank, irfft_fn):
"""Returns a gradient function for an RFFT of the provided rank."""
# Can't happen because we don't register a gradient for RFFT3D.
assert rank in (1, 2), "Gradient for RFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for RFFT with the provided `rank` and `irfft_fn`."""
fft_length = op.inputs[1]
input_shape = _array_ops.shape(op.inputs[0])
is_even = _math_ops.cast(1 - (fft_length[-1] % 2), _dtypes.complex64)
def _tile_for_broadcasting(matrix, t):
expanded = _array_ops.reshape(
matrix,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(t) - 2], _dtypes.int32),
_array_ops.shape(matrix)
], 0))
return _array_ops.tile(
expanded, _array_ops.concat([_array_ops.shape(t)[:-2], [1, 1]], 0))
def _mask_matrix(length):
"""Computes t_n = exp(sqrt(-1) * pi * n^2 / line_len)."""
# TODO(rjryan): Speed up computation of twiddle factors using the
# following recurrence relation and cache them across invocations of RFFT.
#
# t_n = exp(sqrt(-1) * pi * n^2 / line_len)
# for n = 0, 1,..., line_len-1.
# For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2
a = _array_ops.tile(
_array_ops.expand_dims(_math_ops.range(length), 0), (length, 1))
b = _array_ops.transpose(a, [1, 0])
return _math_ops.exp(
-2j * np.pi * _math_ops.cast(a * b, _dtypes.complex64) /
_math_ops.cast(length, _dtypes.complex64))
def _ymask(length):
"""A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`."""
return _math_ops.cast(1 - 2 * (_math_ops.range(length) % 2),
_dtypes.complex64)
y0 = grad[..., 0:1]
if rank == 1:
ym = grad[..., -1:]
extra_terms = y0 + is_even * ym * _ymask(input_shape[-1])
elif rank == 2:
# Create a mask matrix for y0 and ym.
base_mask = _mask_matrix(input_shape[-2])
# Tile base_mask to match y0 in shape so that we can batch-matmul the
# inner 2 dimensions.
tiled_mask = _tile_for_broadcasting(base_mask, y0)
y0_term = _math_ops.matmul(tiled_mask, _math_ops.conj(y0))
extra_terms = y0_term
ym = grad[..., -1:]
ym_term = _math_ops.matmul(tiled_mask, _math_ops.conj(ym))
inner_dim = input_shape[-1]
ym_term = _array_ops.tile(
ym_term,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(grad) - 1], _dtypes.int32),
[inner_dim]
], 0)) * _ymask(inner_dim)
extra_terms += is_even * ym_term
# The gradient of RFFT is the IRFFT of the incoming gradient times a scaling
# factor, plus some additional terms to make up for the components dropped
# due to Hermitian symmetry.
input_size = _math_ops.cast(
_fft_size_for_grad(op.inputs[0], rank), _dtypes.float32)
the_irfft = irfft_fn(grad, fft_length)
return 0.5 * (the_irfft * input_size + _math_ops.real(extra_terms)), None
return _grad
def _irfft_grad_helper(rank, rfft_fn):
"""Returns a gradient function for an IRFFT of the provided rank."""
# Can't happen because we don't register a gradient for IRFFT3D.
assert rank in (1, 2), "Gradient for IRFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for IRFFT with the provided `rank` and `rfft_fn`."""
# Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs
# and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the
# graph we special-case the situation where the FFT length and last
# dimension of the input are known at graph construction time.
fft_length = op.inputs[1]
is_odd = _math_ops.mod(fft_length[-1], 2)
input_last_dimension = _array_ops.shape(op.inputs[0])[-1]
mask = _array_ops.concat(
[[1.0], 2.0 * _array_ops.ones([input_last_dimension - 2 + is_odd]),
_array_ops.ones([1 - is_odd])], 0)
rsize = _math_ops.reciprocal(_math_ops.cast(
_fft_size_for_grad(grad, rank), _dtypes.float32))
# The gradient of IRFFT is the RFFT of the incoming gradient times a scaling
# factor and a mask. The mask scales the gradient for the Hermitian
# symmetric components of the RFFT by a factor of two, since these
# components are de-duplicated in the RFFT.
the_rfft = rfft_fn(grad, fft_length)
return the_rfft * _math_ops.cast(rsize * mask, _dtypes.complex64), None
return _grad
@tf_export("signal.fftshift")
def fftshift(x, axes=None, name=None):
"""Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
@compatibility(numpy)
Equivalent to numpy.fft.fftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html
@end_compatibility
For example:
```python
x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])
x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple`, optional Axes over which to shift. Default is
None, which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "fftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = [int(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = int(x.shape[axes] // 2)
else:
shift = [int((x.shape[ax]) // 2) for ax in axes]
return manip_ops.roll(x, shift, axes, name)
@tf_export("signal.ifftshift")
def ifftshift(x, axes=None, name=None):
"""The inverse of fftshift.
Although identical for even-length x,
the functions differ by one sample for odd-length x.
@compatibility(numpy)
Equivalent to numpy.fft.ifftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftshift.html
@end_compatibility
For example:
```python
x = tf.signal.ifftshift([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]])
x.numpy() # array([[ 4., -4., 3.],[-2., -1., -3.],[ 1., 2., 0.]])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple` Axes over which to calculate. Defaults to None,
which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "ifftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = [-int(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = -int(x.shape[axes] // 2)
else:
shift = [-int(x.shape[ax] // 2) for ax in axes]
return manip_ops.roll(x, shift, axes, name)
_ops.RegisterGradient("RFFT")(_rfft_grad_helper(1, irfft))
_ops.RegisterGradient("IRFFT")(_irfft_grad_helper(1, rfft))
_ops.RegisterGradient("RFFT2D")(_rfft_grad_helper(2, irfft2d))
_ops.RegisterGradient("IRFFT2D")(_irfft_grad_helper(2, rfft2d))
| ghchinoy/tensorflow | tensorflow/python/ops/signal/fft_ops.py | Python | apache-2.0 | 16,184 |
# -*- coding: utf-8 -*-
"""
Manual Steps
~~~~~~~~~~~~
Requirements:
* **koji** package
* package with a koji profile (if needed)
Inputs:
* **profile** - koji instance in which the change will be made
* **owner** - package owner
* **tag** - release (a.k.a. main) koji tag name for a release
* **package** - name of package to be created in a release
Steps:
#. ``koji --profile=<profile> add-pkg --owner=<owner> <tag> <package> [package] ...``
"""
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
from .common import Environment, Release, UsageError, Error, CommandBase
class KojiCreatePackageInRelease(CommandBase):
"""
Create packages in a release.
:param env: Environment object to be used to execute the commands.
:type env: Environment
:param release: Release object.
:type release: Release
:param packages: name of package to be created in a release
:type packages: list of str
:param owner: package owner
:type owner: str
"""
def __init__(self, env, release, packages, owner, scl=None):
"""Adding packages for create and owner as an aditional member."""
super(KojiCreatePackageInRelease, self).__init__(env, release)
self.packages = self._handle_scl(release, scl, sorted(packages))
self.owner = owner
def details(self, commit=False):
"""Print details of command execution.
:param commit: Flag to indicate if the command will be actually executed.
Line indicating "test mode" is printed, if this is False.
:type commit: boolean; default False
"""
details = "Creating packages in a release\n"
details += " * env name: %s\n" % self.env.name
details += " * env config: %s\n" % self.env.config_path
details += " * release source %s\n" % self.release.config_path
details += " * koji profile: %s\n" % self.env["koji_profile"]
details += " * release_id: %s\n" % self.release_id
details += " * owner: %s\n" % self.owner
details += " * tag: %s\n" % self.release["koji"]["tag_release"]
details += " * packages:\n"
for i in self.packages:
details += " %s\n" % i
if not commit:
details += "*** TEST MODE ***"
return details
def get_cmd(self, commit=False):
"""Construct the koji command.
:param commit: Flag to indicate if the command will be actually executed.
"echo" is prepended to the command, if this is False.
:type commit: boolean; default False
:returns: Koji command.
:rtype: list of strings
"""
cmd = []
cmd.append("koji")
cmd.append("--profile=%s" % self.env["koji_profile"])
cmd.append("add-pkg")
cmd.append("--owner=%s" % self.owner)
cmd.append(self.release["koji"]["tag_release"])
cmd.extend(self.packages)
if not commit:
cmd = ["echo"] + cmd
return cmd
@staticmethod
def _handle_scl(release, scl, packages):
"""Check SCL and update package names accordingly.
:param release: Release object.
:type release: Release
:param scl: Software Collection in which packages belong
:type scl: str
:param packages: name of package to be created in a release
:type packages: list of str
"""
scl_required = 'scls' in release
def scl_correct():
return (scl is not None and (
scl in release['scls'] or scl.lower() == 'none'))
if scl_required and scl is None:
message = "Option --scl required! Valid values as found in '%s' are:\n%s"
raise UsageError(message %
(release.config_path,
'\n'.join(release['scls'] + ['none'])))
if scl_required and not scl_correct():
message = "Incorrect SCL selection. Valid values as found in '%s' are:\n%s"
raise UsageError(message %
(release.config_path,
'\n'.join(release['scls'] + ['none'])))
if not scl_required and scl is not None:
message = "'%s' has no SCL data, --scl option should not be used."
raise UsageError(message % (release.config_path))
if scl_required and scl.lower() != 'none':
packages = ["%s-%s" % (scl, package) for package in packages]
return packages
def get_parser():
"""Construct argument parser.
:returns: ArgumentParser object with arguments set up.
"""
parser = argparse.ArgumentParser(
description="Create packages in a koji tag that maps to given release.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"release_id",
metavar="RELEASE_ID",
help="PDC release ID, for example 'fedora-24', 'fedora-24-updates'.",
)
parser.add_argument(
"owner",
metavar="OWNER",
help="Package owner.",
)
parser.add_argument(
"packages",
metavar="PACKAGE",
nargs="+",
help="Koji package, for example 'bash', 'kernel'.",
)
parser.add_argument(
"--commit",
action="store_true",
help="Program performs a dry-run by default. Enable this option to apply the changes.",
)
parser.add_argument(
"--scl",
metavar="SCL",
default=argparse.SUPPRESS,
help="""Software Collection for which packages are created.
Required when release has SCL data.
'none' can be used when none of the SCLs specifed should be used."""
)
parser.add_argument(
"--env",
default="default",
help="Select environment in which the program will make changes.",
)
parser.add_argument(
"-d", "--debug",
action="store_true",
help="Print traceback for exceptions. By default only exception messages are displayed.",
)
return parser
def main():
"""Main function."""
try:
parser = get_parser()
args = parser.parse_args()
# hackish way to suppress 'default' text in help text,
# but keep scl in the namespace
if not hasattr(args, 'scl'):
args.scl = None
env = Environment(args.env)
release = Release(args.release_id)
clone = KojiCreatePackageInRelease(
env, release, args.packages, args.owner, args.scl)
clone.run(commit=args.commit)
except Error:
if not args.debug:
sys.tracebacklimit = 0
raise
if __name__ == "__main__":
main()
| release-engineering/releng-sop | releng_sop/koji_create_package_in_release.py | Python | mit | 6,911 |
"""
This module defines various utilities for dealing with the network.
"""
from asyncio import iscoroutinefunction, iscoroutine
def combine_action_handlers(*handlers):
"""
This function combines the given action handlers into a single function
which will call all of them.
"""
# make sure each of the given handlers is callable
for handler in handlers:
# if the handler is not a function
if not (iscoroutinefunction(handler) or iscoroutine(handler)):
# yell loudly
raise ValueError("Provided handler is not a coroutine: %s" % handler)
# the combined action handler
async def combined_handler(*args, **kwds):
# goes over every given handler
for handler in handlers:
# call the handler
await handler(*args, **kwds)
# return the combined action handler
return combined_handler
| aaivazis/nautilus | nautilus/network/events/util.py | Python | mit | 910 |
import os
import redis
redis_url = os.getenv("REDIS_URL", "redis://127.0.0.1:6379")
redis_conn = redis.from_url(redis_url)
| tmpapageorgiou/spitter | spitter/connection.py | Python | mit | 125 |
# -*- coding:utf-8 -*-
from flask import Flask
from flask_bootstrap import Bootstrap
from admin import create_admin
from models import db, Post, User
from views import blog
from flaskext.markdown import Markdown
from flask_login import LoginManager
def creatApp():
app = Flask(__name__)
Bootstrap(app)
app.config.from_object('config')
registerDatabase(app)
registerBlueprints(app)
create_admin(app)
registerMarkdown(app)
registerTagsFilter(app)
registerLogin(app)
return app
def registerDatabase(app):
db.init_app(app)
def registerBlueprints(app):
app.register_blueprint(blog)
def registerMarkdown(app):
Markdown(app)
def registerTagsFilter(app):
@app.template_filter('mSeries')
def getSeries(tag):
return []
@app.template_filter('mArchive')
def getArchive(tag):
return []
@app.template_filter('mTagCloud')
def getTags(tag):
tags = reduce(lambda x,y:x+y,[tmp.tags for tmp in Post.objects.only('tags').all()])
return sorted({tmp:tags.count(tmp) for tmp in set(tags)}.iteritems(), key=lambda x : x[1],reverse = True)
def registerLogin(app):
loginManager = LoginManager()
loginManager.init_app(app)
loginManager.login_view = "admin.login"
loginManager.login_message = u'请先登录'
@loginManager.user_loader
def loadUser(user_id):
return User.objects(id=user_id).first()
if __name__ == '__main__':
app.run(debug = True)
| DoubleHYH/my_Blog | app/__init__.py | Python | mit | 1,366 |
"""Store functional test data parameters here. (This makes it easier to
ensure your private data does not leak out in your source code.) Rename
this file as params.py so the tests can locate it."""
valid_mdn = '' #For tests requiring a valid MDN
optin_mdn = '' #For account tests requiring an MDN
invalid_mdn = '1234' #For tests with an invalid MDN
coord_inside = (lat_float, lon_float)
coord_outside = (lat_float, lon_float)
| ericem/sprintkit | tests/functional/sample_params.py | Python | mit | 427 |
# -*- coding: utf-8 -*-
"""
Started on mon, apr 23rd, 2018
@author: carlos.arana
"""
# Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from VarInt.VarInt import VarInt
from classes.Meta import Meta
from Compilador.Compilador import compilar
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------------------------------------------------------------------------------------
VarInt | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/VarInt
Meta | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Classes
Compilador | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/Compilador
"""
# Documentacion del Parametro ---------------------------------------------------------------------------------------
# Descripciones del Parametro
M = Meta
M.ClaveParametro = 'P0406'
M.NombreParametro = 'Viviendas urbanas en PCU U1 y U2'
M.DescParam = 'Numero de viviendas dentro de Perimetros de Contención Urbana tipo U1 o U2, por ciudad'
M.UnidadesParam = 'Numero de viviendas'
M.TituloParametro = 'VPCU' # Para nombrar la columna del parametro
M.PeriodoParam = '2018'
M.TipoInt = 1
# Handlings
M.ParDtype = 'float'
M.TipoVar = 'C' # (Tipos de Variable: [C]ontinua, [D]iscreta [O]rdinal, [B]inaria o [N]ominal)
M.array = []
M.TipoAgr = 'sum'
# Descripciones del proceso de Minería
M.nomarchivodataset = 'Rep_Viv_Vig'
M.extarchivodataset = 'xlsx'
M.ContenidoHojaDatos = 'Viviendas (Tipo, Segmento, ubicacion en PCU)'
M.ClaveDataset = r'SNIIV'
M.ActDatos = '2017'
M.Agregacion = 'Se sumó el total de viviendas en PCU U1 o U2 para los municipios que integran cada ciudad del SUN'
# Descripciones generadas desde la clave del parámetro
M.getmetafromds = 1
Meta.fillmeta(M)
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Cargar dataset inicial
dataset = pd.read_excel(M.DirFuente + '\\' + M.ArchivoDataset,
sheetname='DATOS', dtype={'CVE_MUN': 'str'})
dataset.set_index('CVE_MUN', inplace=True)
dataset = dataset.rename_axis('CVE_MUN')
dataset.head(2)
# Generar dataset para parámetro y Variable de Integridad
dataset = dataset[(dataset['Ubicación PCU 2015'] == 'U1') | (dataset['Ubicación PCU 2015'] == 'U2')]
dsvar = 'Viviendas'
par_dataset = dataset[dsvar]
par_dataset = par_dataset.to_frame(name = M.ClaveParametro)
par_dataset, variables_dataset = VarInt(par_dataset, dataset, tipo=M.TipoInt)
# Compilacion
compilar(M, dataset, par_dataset, variables_dataset)
| Caranarq/01_Dmine | 04_Edificaciones/P0406/P0406.py | Python | gpl-3.0 | 2,794 |
# Audio backend used by pyo
# http://ajaxsoundstudio.com/pyodoc/api/classes/server.html
BACKEND = 'portaudio' # multiplatform
#BACKEND = 'jack' # Linux and Mac, if you know what you are doing
#BACKEND = 'coreaudio' # Mac only, untested
# OSC adresses
OSC_EYE = ('localhost', 1420)
OSC_EAR = ('localhost', 1422)
# Size of the rendering window
RENDER_SIZE = (800, 800)
| ff-/pineal | config.py | Python | agpl-3.0 | 380 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class FarmFieldConfig(AppConfig):
name = 'farm_field'
| jknaresh/farmer | farm_field/apps.py | Python | mit | 159 |
__version__ = '0.5.0.dev0+git'
| bjodah/pycompilation | pycompilation/_release.py | Python | bsd-2-clause | 31 |
"""Support for Dyson Pure Cool Link devices."""
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_DEVICES, CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['libpurecoollink==0.4.2']
_LOGGER = logging.getLogger(__name__)
CONF_LANGUAGE = 'language'
CONF_RETRY = 'retry'
DEFAULT_TIMEOUT = 5
DEFAULT_RETRY = 10
DYSON_DEVICES = 'dyson_devices'
DOMAIN = 'dyson'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_LANGUAGE): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_RETRY, default=DEFAULT_RETRY): cv.positive_int,
vol.Optional(CONF_DEVICES, default=[]):
vol.All(cv.ensure_list, [dict]),
})
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Dyson parent component."""
_LOGGER.info("Creating new Dyson component")
if DYSON_DEVICES not in hass.data:
hass.data[DYSON_DEVICES] = []
from libpurecoollink.dyson import DysonAccount
dyson_account = DysonAccount(config[DOMAIN].get(CONF_USERNAME),
config[DOMAIN].get(CONF_PASSWORD),
config[DOMAIN].get(CONF_LANGUAGE))
logged = dyson_account.login()
timeout = config[DOMAIN].get(CONF_TIMEOUT)
retry = config[DOMAIN].get(CONF_RETRY)
if not logged:
_LOGGER.error("Not connected to Dyson account. Unable to add devices")
return False
_LOGGER.info("Connected to Dyson account")
dyson_devices = dyson_account.devices()
if CONF_DEVICES in config[DOMAIN] and config[DOMAIN].get(CONF_DEVICES):
configured_devices = config[DOMAIN].get(CONF_DEVICES)
for device in configured_devices:
dyson_device = next((d for d in dyson_devices if
d.serial == device["device_id"]), None)
if dyson_device:
try:
connected = dyson_device.connect(device["device_ip"])
if connected:
_LOGGER.info("Connected to device %s", dyson_device)
hass.data[DYSON_DEVICES].append(dyson_device)
else:
_LOGGER.warning("Unable to connect to device %s",
dyson_device)
except OSError as ose:
_LOGGER.error("Unable to connect to device %s: %s",
str(dyson_device.network_device), str(ose))
else:
_LOGGER.warning(
"Unable to find device %s in Dyson account",
device["device_id"])
else:
# Not yet reliable
for device in dyson_devices:
_LOGGER.info("Trying to connect to device %s with timeout=%i "
"and retry=%i", device, timeout, retry)
connected = device.auto_connect(timeout, retry)
if connected:
_LOGGER.info("Connected to device %s", device)
hass.data[DYSON_DEVICES].append(device)
else:
_LOGGER.warning("Unable to connect to device %s", device)
# Start fan/sensors components
if hass.data[DYSON_DEVICES]:
_LOGGER.debug("Starting sensor/fan components")
discovery.load_platform(hass, "sensor", DOMAIN, {}, config)
discovery.load_platform(hass, "fan", DOMAIN, {}, config)
discovery.load_platform(hass, "vacuum", DOMAIN, {}, config)
discovery.load_platform(hass, "climate", DOMAIN, {}, config)
return True
| HydrelioxGitHub/home-assistant | homeassistant/components/dyson/__init__.py | Python | apache-2.0 | 3,817 |
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""
Settings Model
A very rudimentary settings implementation which is intended to store our
non-mission-critical options which can be edited via the admin UI.
.. todo:
Rather than fetch one option at a time, load all settings into an object
with attribute-style access.
"""
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.exc import IntegrityError, ProgrammingError
from sqlalchemy.types import Unicode, UnicodeText, Integer, Boolean, Float
from sqlalchemy.orm import mapper, relation, backref, synonym, interfaces, validates
from urlparse import urlparse
from mediadrop.model.meta import DBSession, metadata
from mediadrop.plugin import events
settings = Table('settings', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False, unique=True),
Column('value', UnicodeText),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
multisettings = Table('settings_multi', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('key', Unicode(255), nullable=False),
Column('value', UnicodeText, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
class Setting(object):
"""
A Single Setting
"""
query = DBSession.query_property()
def __init__(self, key=None, value=None):
self.key = key or None
self.value = value or None
def __repr__(self):
return '<Setting: %s = %r>' % (self.key, self.value)
def __unicode__(self):
return self.value
class MultiSetting(object):
"""
A MultiSetting
"""
query = DBSession.query_property()
def __init__(self, key=None, value=None):
self.key = key or None
self.value = value or None
def __repr__(self):
return '<MultiSetting: %s = %r>' % (self.key, self.value)
def __unicode__(self):
return self.value
mapper(Setting, settings, extension=events.MapperObserver(events.Setting))
mapper(MultiSetting, multisettings, extension=events.MapperObserver(events.MultiSetting))
def insert_settings(defaults):
"""Insert the given setting if they don't exist yet.
XXX: Does not include any support for MultiSetting. This approach
won't work for that. We'll need to use a migration script.
:type defaults: list
:param defaults: Key and value pairs
:rtype: list
:returns: Any settings that have just been created.
"""
inserted = []
try:
settings_query = DBSession.query(Setting.key)\
.filter(Setting.key.in_([key for key, value in defaults]))
existing_settings = set(x[0] for x in settings_query)
except ProgrammingError:
# If we are running paster setup-app on a fresh database with a
# plugin which tries to use this function every time the
# Environment.loaded event fires, the settings table will not
# exist and this exception will be thrown, but its safe to ignore.
# The settings will be created the next time the event fires,
# which will likely be the first time the app server starts up.
return inserted
for key, value in defaults:
if key in existing_settings:
continue
transaction = DBSession.begin_nested()
try:
s = Setting(key, value)
DBSession.add(s)
transaction.commit()
inserted.append(s)
except IntegrityError:
transaction.rollback()
if inserted:
DBSession.commit()
return inserted
def fetch_and_create_multi_setting(key, value):
multisettings = MultiSetting.query\
.filter(MultiSetting.key==key)\
.all()
for ms in multisettings:
if ms.value == value:
return ms
ms = MultiSetting(key, value)
DBSession.add(ms)
return ms
| jobsafran/mediadrop | mediadrop/model/settings.py | Python | gpl-3.0 | 4,208 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned ${tf.estimator.Estimator}s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosphy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.util import nest
def _internal_input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
scope=None):
"""See input_layer. `scope` is a name or variable scope to use."""
feature_columns = _clean_feature_columns(feature_columns)
for column in feature_columns:
if not isinstance(column, _DenseColumn):
raise ValueError(
'Items of feature_columns must be a _DenseColumn. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(column))
weight_collections = list(weight_collections or [])
if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
# a non-None `scope` can allow for variable reuse, when, e.g., this function
# is wrapped by a `make_template`.
with variable_scope.variable_scope(
scope, default_name='input_layer', values=features.values()):
builder = _LazyBuilder(features)
output_tensors = []
ordered_columns = []
for column in sorted(feature_columns, key=lambda x: x.name):
ordered_columns.append(column)
with variable_scope.variable_scope(
None, default_name=column._var_scope_name): # pylint: disable=protected-access
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
output_tensors.append(
array_ops.reshape(tensor, shape=(batch_size, num_elements)))
if cols_to_vars is not None:
# Retrieve any variables created (some _DenseColumn's don't create
# variables, in which case an empty list is returned).
cols_to_vars[column] = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope=variable_scope.get_variable_scope().name)
_verify_static_batch_size_equality(output_tensors, ordered_columns)
return array_ops.concat(output_tensors, 1)
def input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None):
"""Returns a dense `Tensor` as input layer based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
Example:
```python
price = numeric_column('price')
keywords_embedded = embedding_column(
categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
columns = [price, keywords_embedded, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
for units in [128, 64, 32]:
dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu)
prediction = tf.layers.dense(dense_tensor, 1)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical features,
you can wrap them with an `embedding_column` or `indicator_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to list of `Variable`s. For example, after
the call, we might have cols_to_vars =
{_EmbeddingColumn(
categorical_column=_HashedCategoricalColumn(
key='sparse_feature', hash_bucket_size=5, dtype=tf.string),
dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),
<tf.Variable 'some_variable:1' shape=(5, 10)]}
If a column creates no variables, its value will be an empty list.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: if an item in `feature_columns` is not a `_DenseColumn`.
"""
return _internal_input_layer(features, feature_columns, weight_collections,
trainable, cols_to_vars)
# TODO(akshayka): InputLayer should be a subclass of Layer, and it
# should implement the logic in input_layer using Layer's build-and-call
# paradigm; input_layer should create an instance of InputLayer and
# return the result of inovking its apply method, just as functional layers do.
class InputLayer(object):
"""An object-oriented version of `input_layer` that reuses variables."""
def __init__(self,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None):
"""See `input_layer`."""
self._feature_columns = feature_columns
self._weight_collections = weight_collections
self._trainable = trainable
self._cols_to_vars = cols_to_vars
self._input_layer_template = template.make_template(
'feature_column_input_layer',
_internal_input_layer,
create_scope_now_=True)
self._scope = self._input_layer_template.variable_scope
def __call__(self, features):
return self._input_layer_template(
features=features,
feature_columns=self._feature_columns,
weight_collections=self._weight_collections,
trainable=self._trainable,
cols_to_vars=None,
scope=self._scope)
@property
def non_trainable_variables(self):
return self._input_layer_template.non_trainable_variables
@property
def non_trainable_weights(self):
return self._input_layer_template.non_trainable_weights
@property
def trainable_variables(self):
return self._input_layer_template.trainable_variables
@property
def trainable_weights(self):
return self._input_layer_template.trainable_weights
@property
def variables(self):
return self._input_layer_template.variables
@property
def weights(self):
return self._input_layer_template.weights
def linear_model(features,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
cols_to_vars=None):
"""Returns a linear prediction `Tensor` based on given `feature_columns`.
This function generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `linear_model` treats categorical columns as
`indicator_column`s while `input_layer` explicitly requires wrapping each
of them with an `embedding_column` or an `indicator_column`.
Example:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features, columns)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum"
the default. "sqrtn" often achieves good accuracy, in particular with
bag-of-words columns. It combines each sparse columns independently.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
weight_collections: A list of collection names to which the Variable will be
added. Note that, variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to associated list of `Variable`s. For
example, after the call, we might have cols_to_vars = {
_NumericColumn(
key='numeric_feature1', shape=(1,):
[<tf.Variable 'linear_model/price2/weights:0' shape=(1, 1)>],
'bias': [<tf.Variable 'linear_model/bias_weights:0' shape=(1,)>],
_NumericColumn(
key='numeric_feature2', shape=(2,)):
[<tf.Variable 'linear_model/price1/weights:0' shape=(2, 1)>]}
If a column creates no variables, its value will be an empty list. Note
that cols_to_vars will also contain a string key 'bias' that maps to a
list of Variables.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its shape
is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: if an item in `feature_columns` is neither a `_DenseColumn`
nor `_CategoricalColumn`.
"""
feature_columns = _clean_feature_columns(feature_columns)
for column in feature_columns:
if not isinstance(column, (_DenseColumn, _CategoricalColumn)):
raise ValueError('Items of feature_columns must be either a _DenseColumn '
'or _CategoricalColumn. Given: {}'.format(column))
weight_collections = list(weight_collections or [])
if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
with variable_scope.variable_scope(
None, default_name='linear_model', values=features.values()):
weighted_sums = []
ordered_columns = []
builder = _LazyBuilder(features)
for column in sorted(feature_columns, key=lambda x: x.name):
with variable_scope.variable_scope(
None, default_name=column._var_scope_name): # pylint: disable=protected-access
ordered_columns.append(column)
weighted_sum = _create_weighted_sum(
column=column,
builder=builder,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable)
weighted_sums.append(weighted_sum)
if cols_to_vars is not None:
# Retrieve the variables created.
cols_to_vars[column] = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope=variable_scope.get_variable_scope().name)
_verify_static_batch_size_equality(weighted_sums, ordered_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
bias = variable_scope.get_variable(
'bias_weights',
shape=[units],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
predictions = nn_ops.bias_add(
predictions_no_bias, bias, name='weighted_sum')
if cols_to_vars is not None:
# Add the bias to cols_to_vars as well, converting the Variable or
# PartitionedVariable to a list of Variable's.
if isinstance(bias, variables.Variable):
cols_to_vars['bias'] = [bias]
else: # Must be a PartitionedVariable.
cols_to_vars['bias'] = list(bias)
return predictions
def _transform_features(features, feature_columns):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing all the `_FeatureColumn`s.
Returns:
A `dict` mapping `_FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _clean_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
builder = _LazyBuilder(features)
for column in sorted(feature_columns, key=lambda x: x.name):
with ops.name_scope(None, default_name=column.name):
outputs[column] = builder.get(column)
return outputs
def make_parse_example_spec(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in `tf.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_b = numeric_column(...)
feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.parse_example(
serialized=serialized_examples,
features=make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `_FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError(
'All feature_columns must be _FeatureColumn instances. '
'Given: {}'.format(column))
config = column._parse_example_spec # pylint: disable=protected-access
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
def embedding_column(
categorical_column, dimension, combiner='mean', initializer=None,
ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None,
trainable=True):
"""`_DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `_CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `_CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
`_DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
return _EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def _shared_embedding_columns(
categorical_columns, dimension, combiner='mean', initializer=None,
shared_embedding_collection_name=None, ckpt_to_load_from=None,
tensor_name_in_ckpt=None, max_norm=None, trainable=True):
"""List of `_DenseColumn`s that convert from sparse, categorical input.
This is similar to `embedding_column`, except that that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of `_CategoricalColumn` created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of `_CategoricalColumn`s created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A list of `_DenseColumn`s that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
if not isinstance(c0, _CategoricalColumn):
raise ValueError(
'All categorical_columns must be subclasses of _CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
if isinstance(c0, _WeightedCategoricalColumn):
c0 = c0.categorical_column
for c in sorted_columns[1:]:
if isinstance(c, _WeightedCategoricalColumn):
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column of the same type. '
'Given column: {} of type: {} does not match given column: {} of '
'type: {}'.format(c0, type(c0), c, type(c)))
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
result = []
for column in categorical_columns:
result.append(_SharedEmbeddingColumn(
categorical_column=column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable))
return result
def numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `_NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = _check_default_value(shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
return _NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `_BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, _NumericColumn):
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if (not boundaries or
not (isinstance(boundaries, list) or isinstance(boundaries, tuple))):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return _BucketizedColumn(source_column, tuple(boundaries))
def _assert_string_or_int(dtype, prefix):
if (dtype != dtypes.string) and (not dtype.is_integer):
raise ValueError(
'{} dtype must be string or integer. dtype: {}.'.format(prefix, dtype))
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string. Note that these values are independent of the
`default_value` argument.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
_assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return _HashedCategoricalColumn(key, hash_bucket_size, dtype)
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `_CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string. Note that these values are independent of the
`default_value` argument.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file) as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
_assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return _VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
def categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A `_CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string. Note that these values are independent of the
`default_value` argument.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `_CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
_assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
_assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return _VocabularyListCategoricalColumn(
key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype,
default_value=default_value, num_oov_buckets=num_oov_buckets)
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `_CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string. Note that these values are independent of the
`default_value` argument.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace inputs in that range.
Returns:
A `_CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
return _IdentityCategoricalColumn(
key=key, num_buckets=num_buckets, default_value=default_value)
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
Used to wrap any `categorical_column_*` (e.g., to feed to DNN). Use
`embedding_column` if the inputs are sparse.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda'])
columns = [name, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `_IndicatorColumn`.
"""
return _IndicatorColumn(categorical_column)
def weighted_categorical_column(
categorical_column, weight_feature_key, dtype=dtypes.float32):
"""Applies weight values to a `_CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `_CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `_CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return _WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `_CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `_CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `_CategoricalColumn`.
ValueError: If any of the keys is `_HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, _CategoricalColumn)):
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except _HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key, _HashedCategoricalColumn):
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return _CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size,
hash_key=hash_key)
class _FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. Following is an example feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. User should not create instances of this.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming and for name_scope."""
pass
@property
def _var_scope_name(self):
"""Returns string. Used for variable_scope. Defaults to self.name."""
return self.name
@abc.abstractmethod
def _transform_feature(self, inputs):
"""Returns intermediate representation (usually a `Tensor`).
Uses `inputs` to create an intermediate representation (usually a `Tensor`)
that other feature columns can use.
Example usage of `inputs`:
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will
be used as follows:
```python
raw_tensor = inputs.get('raw')
fc_tensor = inputs.get(input_fc)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def _parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of ${tf.parse_example} for all
supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). One possible implementation of
_parse_example_spec is as follows:
```python
spec = {'raw': tf.FixedLenFeature(...)}
spec.update(input_fc._parse_example_spec)
return spec
```
"""
pass
class _DenseColumn(_FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def _variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc._get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: List of graph collections to which Variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see ${tf.Variable}).
Returns:
`Tensor` of shape [batch_size] + `_variable_shape`.
"""
pass
def _create_weighted_sum(
column,
builder,
units,
sparse_combiner,
weight_collections,
trainable):
"""Creates a weighted sum for a dense or sparse column for linear_model."""
if isinstance(column, _CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
builder=builder,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable)
else:
return _create_dense_column_weighted_sum(
column=column,
builder=builder,
units=units,
weight_collections=weight_collections,
trainable=trainable)
def _create_dense_column_weighted_sum(
column, builder, units, weight_collections, trainable):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
weight = variable_scope.get_variable(
name='weights',
shape=[num_elements, units],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return math_ops.matmul(tensor, weight, name='weighted_sum')
class _CategoricalColumn(_FeatureColumn):
"""Represents a categorical feature.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
A categorical feature typically handled with a ${tf.SparseTensor} of IDs.
"""
__metaclass__ = abc.ABCMeta
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ['id_tensor', 'weight_tensor'])
@abc.abstractproperty
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
inputs: A `LazyBuilder` as a cache to get input tensors required to
create `IdWeightPair`.
weight_collections: List of graph collections to which variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see ${tf.get_variable}).
"""
pass
def _create_categorical_column_weighted_sum(
column, builder, units, sparse_combiner, weight_collections, trainable):
"""Create a weighted sum of a categorical column for linear_model."""
sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
weight = variable_scope.get_variable(
name='weights',
shape=(column._num_buckets, units), # pylint: disable=protected-access
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return _safe_embedding_lookup_sparse(
weight,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class _LazyBuilder(object):
"""Handles caching of transformations while building the model.
`_FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`_LazyBuilder` caches all previously transformed columns.
Example:
We're trying to use the following `_FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `_LazyBuilder` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `_LazyBuilder`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `_FeatureColumn` key
means that this `Tensor` is the output of an existing `_FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`_FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `_FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `_FeatureColumn`.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if not isinstance(key, (str, _FeatureColumn)):
raise TypeError('"key" must be either a "str" or "_FeatureColumn". '
'Provided: {}'.format(key))
if not isinstance(key, _FeatureColumn):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column._transform_feature(self) # pylint: disable=protected-access
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], -1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Give: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _shape_offsets(shape):
"""Returns moving offset for each dimension given shape."""
offsets = []
for dim in reversed(shape):
if offsets:
offsets.append(dim * offsets[-1])
else:
offsets.append(dim)
offsets.reverse()
return offsets
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _clean_feature_columns(feature_columns):
"""Verifies and normalizes `feature_columns` input."""
if isinstance(feature_columns, _FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError('Items of feature_columns must be a _FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = dict()
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return feature_columns
class _NumericColumn(_DenseColumn,
collections.namedtuple('_NumericColumn', [
'key', 'shape', 'default_value', 'dtype',
'normalizer_fn'
])):
"""see `numeric_column`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.to_float(input_tensor)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(self.shape)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing numeric feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in
this function.
Returns:
Dense `Tensor` created within `_transform_feature`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
class _BucketizedColumn(_DenseColumn, _CategoricalColumn,
collections.namedtuple('_BucketizedColumn', [
'source_column', 'boundaries'])):
"""See `bucketized_column`."""
@property
def name(self):
return '{}_bucketized'.format(self.source_column.name)
@property
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return array_ops.one_hot(
indices=math_ops.to_int64(input_tensor),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
@property
def _num_buckets(self):
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
input_tensor = inputs.get(self)
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
dense_shape = math_ops.to_int64(array_ops.stack(
[batch_size, source_dimension]))
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return _CategoricalColumn.IdWeightPair(sparse_tensor, None)
class _EmbeddingColumn(
_DenseColumn,
collections.namedtuple('_EmbeddingColumn', (
'categorical_column', 'dimension', 'combiner', 'initializer',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'
))):
"""See `embedding_column`."""
@property
def name(self):
if not hasattr(self, '_name'):
self._name = '{}_embedding'.format(self.categorical_column.name)
return self._name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def _variable_shape(self):
if not hasattr(self, '_shape'):
self._shape = tensor_shape.vector(self.dimension)
return self._shape
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections=weight_collections, trainable=trainable)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return _safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
class _SharedEmbeddingColumn(
_DenseColumn,
collections.namedtuple('_SharedEmbeddingColumn', (
'categorical_column', 'dimension', 'combiner', 'initializer',
'shared_embedding_collection_name', 'ckpt_to_load_from',
'tensor_name_in_ckpt', 'max_norm', 'trainable'
))):
"""See `embedding_column`."""
@property
def name(self):
if not hasattr(self, '_name'):
self._name = '{}_shared_embedding'.format(self.categorical_column.name)
return self._name
@property
def _var_scope_name(self):
return self.shared_embedding_collection_name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def _variable_shape(self):
if not hasattr(self, '_shape'):
self._shape = tensor_shape.vector(self.dimension)
return self._shape
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections=weight_collections, trainable=trainable)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
shared_embedding_collection = ops.get_collection(
self.shared_embedding_collection_name)
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
'Collection {} can only contain one variable. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(shared_embedding_collection))
embedding_weights = shared_embedding_collection[0]
if embedding_weights.get_shape() != embedding_shape:
raise ValueError(
'Shared embedding collection {} contains variable {} of '
'unexpected shape {}. Expected shape is {}. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(
self.shared_embedding_collection_name, embedding_weights.name,
embedding_weights.get_shape(), embedding_shape))
else:
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
ops.add_to_collection(
self.shared_embedding_collection_name, embedding_weights)
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return _safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _create_tuple(shape, value):
"""Returns a tuple with given shape and filled with value."""
if shape:
return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])])
return value
def _as_tuple(value):
if not nest.is_sequence(value):
return value
return tuple([_as_tuple(v) for v in value])
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
def _is_shape_and_default_value_compatible(default_value, shape):
"""Verifies compatibility of shape and default_value."""
# Invalid condition:
# * if default_value is not a scalar and shape is empty
# * or if default_value is an iterable and shape is not empty
if nest.is_sequence(default_value) != bool(shape):
return False
if not shape:
return True
if len(default_value) != shape[0]:
return False
for i in range(shape[0]):
if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]):
return False
return True
def _check_default_value(shape, default_value, dtype, key):
"""Returns default value as tuple if it's valid, otherwise raises errors.
This function verifies that `default_value` is compatible with both `shape`
and `dtype`. If it is not compatible, it raises an error. If it is compatible,
it casts default_value to a tuple and returns it. `key` is used only
for error message.
Args:
shape: An iterable of integers specifies the shape of the `Tensor`.
default_value: If a single value is provided, the same value will be applied
as the default value for every item. If an iterable of values is
provided, the shape of the `default_value` should be equal to the given
`shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
key: Column name, used only for error messages.
Returns:
A tuple which will be used as default value.
Raises:
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
if default_value is None:
return None
if isinstance(default_value, int):
return _create_tuple(shape, default_value)
if isinstance(default_value, float) and dtype.is_floating:
return _create_tuple(shape, default_value)
if callable(getattr(default_value, 'tolist', None)): # Handles numpy arrays
default_value = default_value.tolist()
if nest.is_sequence(default_value):
if not _is_shape_and_default_value_compatible(default_value, shape):
raise ValueError(
'The shape of default_value must be equal to given shape. '
'default_value: {}, shape: {}, key: {}'.format(
default_value, shape, key))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = all(
isinstance(v, int) for v in nest.flatten(default_value))
is_list_has_float = any(
isinstance(v, float) for v in nest.flatten(default_value))
if is_list_all_int:
return _as_tuple(default_value)
if is_list_has_float and dtype.is_floating:
return _as_tuple(default_value)
raise TypeError('default_value must be compatible with dtype. '
'default_value: {}, dtype: {}, key: {}'.format(
default_value, dtype, key))
class _HashedCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_HashedCategoricalColumn',
['key', 'hash_bucket_size', 'dtype'])):
"""see `categorical_column_with_hash_bucket`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input(inputs.get(self.key))
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
_assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _VocabularyFileCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_VocabularyFileCategoricalColumn', (
'key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype',
'default_value'
))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input(inputs.get(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
_assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.to_int64(input_tensor)
return lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _VocabularyListCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_VocabularyListCategoricalColumn', (
'key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'
))):
"""See `categorical_column_with_vocabulary_list`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input(inputs.get(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
_assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.to_int64(input_tensor)
return lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _IdentityCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_IdentityCategoricalColumn', (
'key', 'num_buckets', 'default_value'
))):
"""See `categorical_column_with_identity`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input(inputs.get(self.key))
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = math_ops.to_int64(input_tensor.values, name='values')
num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets')
zero = math_ops.to_int64(0, name='zero')
if self.default_value is None:
# Fail if values are out-of-range.
assert_less = check_ops.assert_less(
values, num_buckets, data=(values, num_buckets),
name='assert_less_than_num_buckets')
assert_greater = check_ops.assert_greater_equal(
values, zero, data=(values,),
name='assert_greater_or_equal_0')
with ops.control_dependencies((assert_less, assert_greater)):
values = array_ops.identity(values)
else:
# Assign default for out-of-range values.
values = array_ops.where(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.to_int64(self.default_value),
name='default_values'),
values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.num_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _WeightedCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_WeightedCategoricalColumn', (
'categorical_column', 'weight_feature_key', 'dtype'
))):
"""See `weighted_categorical_column`."""
@property
def name(self):
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_feature(self, inputs):
weight_tensor = inputs.get(self.weight_feature_key)
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input(weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return _CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
class _CrossedColumn(
_CategoricalColumn,
collections.namedtuple('_CrossedColumn',
['keys', 'hash_bucket_size', 'hash_key'])):
"""See `crossed_column`."""
@property
def name(self):
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, _FeatureColumn):
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def _parse_example_spec(self):
config = {}
for key in self.keys:
if isinstance(key, _FeatureColumn):
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
def _transform_feature(self, inputs):
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, _CategoricalColumn):
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops._sparse_cross_hashed( # pylint: disable=protected-access
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `_CrossedColumn`.
Returns:
A list of strings or `_CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, _CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
# TODO(zakaria): Move this to embedding_ops and make it public.
def _safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner='mean',
default_id=None,
name=None,
partition_strategy='div',
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if embedding_weights is None:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
with ops.name_scope(name, 'embedding_lookup',
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor_lib.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid, math_ops.greater(sparse_weights.values, 0))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
class _IndicatorColumn(_DenseColumn,
collections.namedtuple('_IndicatorColumn',
['categorical_column'])):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def name(self):
return '{}_indicator'.format(self.categorical_column.name)
def _transform_feature(self, inputs):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
# Remove (?, -1) index
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
return sparse_ops.sparse_tensor_to_dense(weighted_column)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor,
depth=self._variable_shape[-1],
on_value=1.0,
off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def _variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in
this function.
Returns:
Dense `Tensor` created within `_transform_feature`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
def _verify_static_batch_size_equality(tensors, columns):
# bath_size is a tf.Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
if tensors[i].shape[0].value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = tensors[i].shape[0]
elif not expected_batch_size.is_compatible_with(tensors[i].shape[0]):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, tensors[i].shape[0]))
| jwlawson/tensorflow | tensorflow/python/feature_column/feature_column.py | Python | apache-2.0 | 116,403 |
#! /usr/bin/env python
#
# Load a FITS cube , extract the spectrum at a (or reference) pixel
# and operate and plot some and then more....
#
#
# 22-jun-2017 PJT summer project - cloned off cubespectrum.py
# july-2017 Thomas/Peter various improvements
#
# @todo
# - have optional RESTFRQ or RESTFREQ as 3rd argument [done]
# - output the spectrum in a table, much like testCubeSpectrum.tab [done]
# - resample the gauss finer (not 5 points but may be 10x more?)
import os, sys, math
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.units import Quantity
c = 299792.458 # [km/s] there should be a way to get 'c' from astropy.units ?
na = len(sys.argv)
if na == 7:
# Must be in Km/s
fitsfile = sys.argv[1]
pos = [int(sys.argv[2]),int(sys.argv[3])]
restfreq = float(sys.argv[4])* 1e9
vmin = float(sys.argv[5])
vmax = float(sys.argv[6])
use_vel = True
elif na == 5:
# Must be in GHz
fitsfile = sys.argv[1]
pos = [int(sys.argv[2]),int(sys.argv[3])]
vmin = vmax = None
restfreq = float(sys.argv[4])* 1e9
use_vel = True
elif na == 4:
# Pixel position
fitsfile = sys.argv[1]
pos = [int(sys.argv[2]),int(sys.argv[3])]
restfreq = None
vmin = vmax = None
use_vel = False
elif na == 2:
# Fits file
fitsfile = sys.argv[1]
pos = None
restfreq = None
vmin = vmax = None
use_vel = False
else:
sys.exit(1)
# open the fits file
hdu = fits.open(fitsfile)
print(len(hdu))
# get a reference to the header and data. Data should be 3dim numpy array now
h = hdu[0].header
d = hdu[0].data.squeeze()
print(d.shape)
# grab the restfreq, there are at least two ways how this is done
if restfreq == None:
if 'RESTFRQ' in h:
restfreq=h['RESTFRQ']
elif 'RESTFREQ' in h:
restfreq=h['RESTFREQ']
else:
restfreq= h['CRVAL3']
print("RESTFREQ",restfreq)
if pos == None:
# the FITS reference pixel is always a good backup
xpos = int(h['CRPIX1'])
ypos = int(h['CRPIX2'])
print("No position given, using reference pixel %g %g" % (xpos,ypos))
else:
xpos = pos[0]
ypos = pos[1]
flux = d[:,ypos,xpos]
nchan = d.shape[0]
channeln = np.arange(nchan)
zero = np.zeros(nchan)
cdelt3 = h['CDELT3']
crval3 = h['CRVAL3']
crpix3 = h['CRPIX3']
# to convert the channel to frequency
channelf = (channeln-crpix3+1)*cdelt3 + crval3
# to convert the Frequency to velocity
#channelv = (1.0-channelf/restfreq) * c
#print (channelf)
#print (channelv)
# what we plot
#channel = channelv
#channel = channelf
#channel = channeln
if use_vel:
# to convert the Frequency to velocity
channelv = (1.0-channelf/restfreq) * c
channel = channelv
print (channelv.min())
print (channelv.max())
else:
channel = channelf
print (channelf.min())
print (channelf.max())
ipeak = flux.argmax()
xpeak = channel[ipeak]
ypeak = flux[ipeak]
# moments around the peak
if na == 7:
m = 5
x = channel[ipeak-m:ipeak+m]
y = flux[ipeak-m:ipeak+m]
xmean = (x*y).sum() / y.sum()
xdisp = (x*x*y).sum() / y.sum() - xmean*xmean
if xdisp > 0:
xdisp = math.sqrt(xdisp)
fwhm = 2.355 * xdisp
print("MEAN/DISP/FWHM:",xmean,xdisp,fwhm)
ymodel = ypeak * np.exp(-0.5*(x-xmean)**2/(xdisp*xdisp))
if use_vel == True:
plt.figure()
if vmin != None:
channelv = ma.masked_outside(channelv,vmin,vmax)
plt.xlim([vmin,vmax])
plt.plot(channelv,flux,'o-',markersize=2,label='data')
plt.plot(channelv,zero)
# plt.plot(x,ymodel,label='gauss')
plt.xlabel("Velocity (km/s)")
plt.ylabel("Flux")
plt.title(fitsfile +" @ %g %g" % (xpos,ypos)+ " %g" % (restfreq/1e9)+ 'Ghz')
plt.legend()
plt.show()
else:
plt.figure()
plt.plot(channelf/1e9,flux,'o-',markersize=2,label='data')
plt.plot(channelf/1e9,zero)
plt.xlabel("Frequency (GHz)")
plt.ylabel("Flux")
plt.title(fitsfile + " @ %g %g" % (xpos,ypos))
plt.legend()
plt.show()
#to create a table of the frequency and flux
xtab = channelf /1e9 #to set the freqency to GHz
ytab = flux
np.savetxt('Frequency_Flux.tab',np.c_[xtab,ytab], delimiter=' ',header=("Frequency"" " "Flux"),comments='#',fmt='%.8f')
| astroumd/n253lines | cubespectrum2.py | Python | mit | 4,302 |
#!/usr/bin/env python3
import json
import os
import unittest
import requests
AGNOS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
MANIFEST = os.path.join(AGNOS_DIR, "agnos.json")
class TestAgnosUpdater(unittest.TestCase):
def test_manifest(self):
with open(MANIFEST) as f:
m = json.load(f)
for img in m:
r = requests.head(img['url'])
r.raise_for_status()
self.assertEqual(r.headers['Content-Type'], "application/x-xz")
if not img['sparse']:
assert img['hash'] == img['hash_raw']
if __name__ == "__main__":
unittest.main()
| commaai/openpilot | selfdrive/hardware/tici/test_agnos_updater.py | Python | mit | 595 |
"""
This is used to make star field backgrounds.
"""
import sys, random, pygame, glob, fnmatch
from pygame.locals import *
# star colors
# spectral type R G B
SPECTRA = { 'O': (225,225,255), \
'B': (225,255,255), \
'A': (255,255,255), \
'F': (255,255,225), \
'G': (255,255,200), \
'K': (255,225,200), \
'M': (255,200,200)}
STARS = { 'g' : {} , 'd' : {} }
def replace_color(color1, color2, img):
""" replace color1 with color2 in img """
img = img.copy()
pixObj = pygame.PixelArray(img)
img_size = img.get_size()
for x in range(img_size[0]):
for y in range(img_size[1]):
if pixObj[x][y] == img.map_rgb(color1):
pixObj[x][y] = color2
del pixObj
return img
def load_stars():
"""Load stars and create colored star images in the global STARS dict"""
img = pygame.image.load('./images/dGrey.png')
for type in SPECTRA:
new_img = replace_color((150,150,150), SPECTRA[type], img)
STARS['d'][type] = new_img
img = pygame.image.load('./images/gGrey.png')
for type in SPECTRA:
new_img = replace_color((150,150,150), SPECTRA[type], img)
STARS['g'][type] = new_img
def main():
sizes = STARS.keys()
colors = SPECTRA.keys()
side = 6400
load_stars()
bg = pygame.Surface((side,side),SRCALPHA)
for i in range(10000):
size = random.choice(sizes)
color = random.choice(colors)
x = random.randint(0,side)
y = random.randint(0,side)
star = STARS[size][color]
bg.blit(star, [x,y])
pygame.image.save(bg, './images/test2.png')
if __name__ == '__main__':
main() | bobgeis/LookOutSpacePirates | starMaker.py | Python | bsd-3-clause | 1,541 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import pyauto_functional # Must be imported before pyauto
import pyauto
import pyauto_errors
class PyAutoTest(pyauto.PyUITest):
"""Test functionality of the PyAuto framework."""
_EXTRA_CHROME_FLAGS = [
'--scooby-doo=123',
'--donald-duck=cool',
'--super-mario',
'--marvin-the-martian',
]
def ExtraChromeFlags(self):
"""Ensures Chrome is launched with some custom flags.
Overrides the default list of extra flags passed to Chrome. See
ExtraChromeFlags() in pyauto.py.
"""
return pyauto.PyUITest.ExtraChromeFlags(self) + self._EXTRA_CHROME_FLAGS
def testSetCustomChromeFlags(self):
"""Ensures that Chrome can be launched with custom flags."""
self.NavigateToURL('about://version')
for flag in self._EXTRA_CHROME_FLAGS:
self.assertEqual(self.FindInPage(flag)['match_count'], 1,
msg='Missing expected Chrome flag "%s"' % flag)
def testCallOnInvalidWindow(self):
"""Verify that exception is raised when a browser is missing/invalid."""
self.assertEqual(1, self.GetBrowserWindowCount())
self.assertRaises(
pyauto_errors.JSONInterfaceError,
lambda: self.FindInPage('some text', windex=1)) # invalid window
def testJSONInterfaceTimeout(self):
"""Verify that an exception is raised when the JSON interface times out."""
self.ClearEventQueue()
self.AddDomEventObserver('foo')
self.assertRaises(
pyauto_errors.JSONInterfaceError,
lambda: self.GetNextEvent(timeout=2000)) # event queue is empty
if __name__ == '__main__':
pyauto_functional.Main()
| keishi/chromium | chrome/test/functional/test_pyauto.py | Python | bsd-3-clause | 1,808 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.