text
stringlengths 4
1.02M
| meta
dict |
---|---|
import urllib.request
import urllib.parse
import getpass
class CosignPasswordMgr(object):
"""A password manager for CosignHandler objects.
"""
def newcred(self):
"""Default callback.
Ask user for username and password."""
return {'login': input('username: '),
'password': getpass.getpass()}
def __init__(self, cred=None, max_tries=5, callback=newcred):
"""Create a new CosignPasswordMgr.
Args:
cred: Initial credentials. Will be returned by the first
call to get_cred(). Should be a dictionary of the form:
{'login': username, 'password': password}
max_tries: Maximum number of times get_cred() may be called
before IndexError is raised.
callback: A function to be called to get new
credentials. The current object instance (self) will be
passed as the first argument.
"""
self.set_cred(cred)
self.try_count = 1
self.max_tries = max_tries
self.callback = callback
def set_cred(self, cred):
"""Set stored credentials to cred.
cred should be of the form:
{'login': username, 'password': password}
"""
self.cred = cred
self.dirty = False
def get_cred(self):
"""Get new credentials.
Return a credentials dictionary (see set_cred()). Raise an
IndexError exception if self.max_tries have already been made.
"""
if not self.dirty and self.cred is not None:
self.try_count = self.try_count + 1
self.dirty = True
return self.cred
if self.try_count > self.max_tries:
raise IndexError("Exceeded max_tries ({})".format(self.max_tries))
self.cred = self.callback(self)
self.try_count = self.try_count + 1
self.dirty = True
return self.cred
class CosignHandler(urllib.request.BaseHandler):
"""urllib.request style handler for Cosign protected URLs.
See http://weblogin.org
SYNOPSIS:
# Cosign relies on cookies.
cj = http.cookiejar.MozillaCookieJar('cookies.txt')
# We need an opener that handles cookies and any cosign redirects and
# logins.
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cj),
# Here's the CosignHandler.
CosignHandler('https://cosign.login/page',
cj,
CosignPasswordMgr()
# If you've got one big program you'll probably
# want to keep the cookies in memory, but for
# lots of little programs we get single sign on
# behaviour by saving and loading to/from a
# file.
save_cookies=True
)
)
# Construct a request for the page we actually want
req = urllib.request.Request(
url='https://some.cosign.protected/url',
)
# make the request
res = opener.open(req)
# If all went well, res encapsulates the desired result, use res.read()
# to get at the data and so on.
"""
def __init__(self, login_url, cj, pw_mgr, save_cookies=True):
"""Construct new CosignHandler.
Args:
login_url: URL of cosign login page. Used to figure out if we
have been redirected to the login page after a failed
authentication, and as the URL to POST to to log in.
cj: An http.cookiejar.CookieJar or equivalent. You'll need
something that implements the FileCookieJar interface if
you want to load/save cookies.
pw_mgr: A CosignPasswordMgr object or equivalent. This
object will provide (and if necessary prompt for) the
username and password.
save_cookies: Whether or not to save cookies to a file after
each request. Required for single sign on between
different scripts.
"""
super().__init__()
self.login_url = login_url
self.cj = cj
self.pw_mgr = pw_mgr
self.save_cookies = save_cookies
# try to load cookies from file (specified when constructing cj)
try:
self.cj.load(ignore_discard=True)
except IOError:
pass
def https_response(self, req, res):
"""Handle https_response.
If the response is from the cosign login page (starts with
self.login_url) then log in to cosign and retry. Otherwise
continue as normal.
"""
if res.code == 200 and res.geturl().startswith(self.login_url + '?'):
# Been redirected to login page.
# We'll need the cosign cookies later
self.cj.extract_cookies(res, req)
# Grab a username and password.
data = urllib.parse.urlencode(self.pw_mgr.get_cred())
# Construct a login POST request to the login page.
req2 = urllib.request.Request(
self.login_url,
data.encode('iso-8859-1'),
)
# We need a different opener that doesn't have a CosignHandler.
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(self.cj)
)
# Try the login
res2 = opener.open(req2)
# Cookies, cookies, cookies
self.cj.extract_cookies(res2, req2)
# We should be logged in, go back and get what was asked for
res = opener.open(req)
# If we end up back at the login page then login failed
if res.geturl().startswith(self.login_url + '?'):
raise Exception('Login failed.')
if self.save_cookies:
self.cj.extract_cookies(res,req)
self.cj.save(ignore_discard=True)
return res
| {
"content_hash": "369524fa28d399e3e9ad6f78104e64ec",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 78,
"avg_line_length": 33.67796610169491,
"alnum_prop": 0.5796007381311861,
"repo_name": "ActiveState/code",
"id": "7ad242832d5a4ae23fe7659d65725379e82bdf38",
"size": "5961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/578217_Cosign_Handler/recipe-578217.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
import numpy as np
from sklearn.linear_model import LogisticRegression
import torch
import torchvision.datasets
import torchvision.models
import torchvision.transforms
from rbm import RBM
########## CONFIGURATION ##########
BATCH_SIZE = 64
VISIBLE_UNITS = 784 # 28 x 28 images
HIDDEN_UNITS = 128
CD_K = 2
EPOCHS = 10
DATA_FOLDER = 'data/mnist'
CUDA = torch.cuda.is_available()
CUDA_DEVICE = 0
if CUDA:
torch.cuda.set_device(CUDA_DEVICE)
########## LOADING DATASET ##########
print('Loading dataset...')
train_dataset = torchvision.datasets.MNIST(root=DATA_FOLDER, train=True, transform=torchvision.transforms.ToTensor(), download=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE)
test_dataset = torchvision.datasets.MNIST(root=DATA_FOLDER, train=False, transform=torchvision.transforms.ToTensor(), download=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE)
########## TRAINING RBM ##########
print('Training RBM...')
rbm = RBM(VISIBLE_UNITS, HIDDEN_UNITS, CD_K, use_cuda=CUDA)
for epoch in range(EPOCHS):
epoch_error = 0.0
for batch, _ in train_loader:
batch = batch.view(len(batch), VISIBLE_UNITS) # flatten input data
if CUDA:
batch = batch.cuda()
batch_error = rbm.contrastive_divergence(batch)
epoch_error += batch_error
print('Epoch Error (epoch=%d): %.4f' % (epoch, epoch_error))
########## EXTRACT FEATURES ##########
print('Extracting features...')
train_features = np.zeros((len(train_dataset), HIDDEN_UNITS))
train_labels = np.zeros(len(train_dataset))
test_features = np.zeros((len(test_dataset), HIDDEN_UNITS))
test_labels = np.zeros(len(test_dataset))
for i, (batch, labels) in enumerate(train_loader):
batch = batch.view(len(batch), VISIBLE_UNITS) # flatten input data
if CUDA:
batch = batch.cuda()
train_features[i*BATCH_SIZE:i*BATCH_SIZE+len(batch)] = rbm.sample_hidden(batch).cpu().numpy()
train_labels[i*BATCH_SIZE:i*BATCH_SIZE+len(batch)] = labels.numpy()
for i, (batch, labels) in enumerate(test_loader):
batch = batch.view(len(batch), VISIBLE_UNITS) # flatten input data
if CUDA:
batch = batch.cuda()
test_features[i*BATCH_SIZE:i*BATCH_SIZE+len(batch)] = rbm.sample_hidden(batch).cpu().numpy()
test_labels[i*BATCH_SIZE:i*BATCH_SIZE+len(batch)] = labels.numpy()
########## CLASSIFICATION ##########
print('Classifying...')
clf = LogisticRegression()
clf.fit(train_features, train_labels)
predictions = clf.predict(test_features)
print('Result: %d/%d' % (sum(predictions == test_labels), test_labels.shape[0]))
| {
"content_hash": "b05d85ee920c5b99227af1e7d5b96210",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 132,
"avg_line_length": 28.462365591397848,
"alnum_prop": 0.6819040423120514,
"repo_name": "GabrielBianconi/pytorch-rbm",
"id": "943e3d95b5f3acc58f3c97ca34c37a11971d6f2a",
"size": "2647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnist_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6656"
}
],
"symlink_target": ""
} |
import re
import urlparse
import pprint
import os
import sys
import redis
from beaker.middleware import SessionMiddleware
from MacroExecutor import MacroExecutorPage, MacroExecutorWiki, MacroExecutorPreprocess
from PortalAuthenticatorOSIS import PortalAuthenticatorOSIS
from RequestContext import RequestContext
from PortalRest import PortalRest
from .OsisBeaker import OsisBeaker
from JumpScale import j
from gevent.pywsgi import WSGIServer
import gevent
import time
import mimeparse
import mimetypes
import urllib
import cgi
import JumpScale.grid.agentcontroller
BLOCK_SIZE = 4096
CONTENT_TYPE_JSON = 'application/json'
CONTENT_TYPE_JS = 'application/javascript'
CONTENT_TYPE_YAML = 'application/yaml'
CONTENT_TYPE_PLAIN = 'text/plain'
CONTENT_TYPE_HTML = 'text/html'
CONTENT_TYPE_PNG = 'image/png'
class PortalServer:
##################### INIT
def __init__(self):
self.hrd = j.application.instanceconfig
self.contentdirs = list()
self.libpath = j.html.getHtmllibDir()
self.started = False
self.epoch = time.time()
self.cfgdir="cfg"
j.core.portal.active=self
self.osis = j.core.osis.getClientByInstance()
self.pageKey2doc = {}
self.routes = {}
self.loadConfig()
macroPathsPreprocessor = ["macros/preprocess"]
macroPathsWiki = ["macros/wiki"]
macroPathsPage = ["macros/page"]
self.macroexecutorPreprocessor = MacroExecutorPreprocess(macroPathsPreprocessor)
self.macroexecutorPage = MacroExecutorPage(macroPathsPage)
self.macroexecutorWiki = MacroExecutorWiki(macroPathsWiki)
self.bootstrap()
session_opts = {
'session.cookie_expires': False,
'session.type': 'OsisBeaker',
'session.namespace_class': OsisBeaker,
'session.namespace_args': {'client': self.osis},
'session.data_dir': '%s' % j.system.fs.joinPaths(j.dirs.varDir, "beakercache")
}
self._router = SessionMiddleware(self.router, session_opts)
self._webserver = WSGIServer((self.listenip, self.port), self._router)
# wwwroot = wwwroot.replace("\\", "/")
# while len(wwwroot) > 0 and wwwroot[-1] == "/":
# wwwroot = wwwroot[:-1]
# self.wwwroot = wwwroot
self.confluence2htmlconvertor = j.tools.docgenerator.getConfluence2htmlConvertor()
self.activejobs = list()
self.jobids2greenlets = dict()
self.schedule1min = {}
self.schedule15min = {}
self.schedule60min = {}
self.rediscache=redis.StrictRedis(host='localhost', port=9999, db=0)
self.redisprod=redis.StrictRedis(host='localhost', port=9999, db=0)
self.jslibroot=j.system.fs.joinPaths(j.dirs.baseDir,"apps","portals","jslib")
self.auth=PortalAuthenticatorOSIS(self.osis)
self.loadSpaces()
self.rest=PortalRest(self)
def loadConfig(self):
def replaceVar(txt):
# txt = txt.replace("$base", j.dirs.baseDir).replace("\\", "/")
txt = txt.replace("$appdir", j.system.fs.getcwd()).replace("\\", "/")
txt = txt.replace("$vardir", j.dirs.varDir).replace("\\", "/")
txt = txt.replace("$htmllibdir", j.html.getHtmllibDir()).replace("\\", "/")
txt = txt.replace("\\", "/")
return txt
######INIT FILE
ini = j.tools.inifile.open(self.cfgdir + "/portal.cfg")
if ini.checkParam("main", "appdir"):
self.appdir = replaceVar(ini.getValue("main", "appdir"))
self.appdir=self.appdir.replace("$base",j.dirs.baseDir)
else:
self.appdir = j.system.fs.getcwd()
self.getContentDirs() #contentdirs need to be loaded before we go to other dir of base server
j.system.fs.changeDir(self.appdir)
self.listenip = '0.0.0.0'
if ini.checkSection('main') and ini.checkParam('main', 'listenip'):
self.listenip = ini.getValue('main', 'listenip')
self.port = int(ini.getValue("main", "webserverport"))
self.addr = ini.getValue("main", "pubipaddr")
self.logdir= j.system.fs.joinPaths(j.dirs.logDir,"portal",str(self.port))
j.system.fs.createDir(self.logdir)
self.secret = ini.getValue("main", "secret")
self.admingroups = ini.getValue("main", "admingroups").split(",")
self.filesroot = replaceVar(ini.getValue("main", "filesroot"))
j.system.fs.createDir(self.filesroot)
if ini.checkParam('main', 'defaultspace'):
self.defaultspace = ini.getValue('main', 'defaultspace') or 'system'
else:
self.defaultspace = 'system'
if ini.checkParam('main', 'defaulpage'):
self.defaultpage = ini.getValue('main', 'defaultpage') or ""
else:
self.defaultpage = ""
self.getContentDirs()
def reset(self):
self.routes={}
self.loadConfig()
self.bootstrap()
j.core.codegenerator.resetMemNonSystem()
j.core.specparser.resetMemNonSystem()
# self.actorsloader.scan(path=self.contentdirs,reset=True) #do we need to load them all
self.bucketsloader = j.core.portalloader.getBucketsLoader()
self.spacesloader = j.core.portalloader.getSpacesLoader()
self.loadSpaces()
def bootstrap(self):
self.actors = {} # key is the applicationName_actorname (lowercase)
self.actorsloader = j.core.portalloader.getActorsLoader()
self.app_actor_dict = {}
self.taskletengines = {}
self.actorsloader.reset()
# self.actorsloader._generateLoadActor("system", "contentmanager", actorpath="%s/apps/portalbase/system/system__contentmanager/"%j.dirs.baseDir)
# self.actorsloader._generateLoadActor("system", "master", actorpath="system/system__master/")
# self.actorsloader._generateLoadActor("system", "usermanager", actorpath="system/system__usermanager/")
self.actorsloader.scan(self.contentdirs)
self.actorsloader.getActor("system", "contentmanager")
self.actorsloader.getActor("system", "usermanager")
def loadSpaces(self):
self.bucketsloader = j.core.portalloader.getBucketsLoader()
self.spacesloader = j.core.portalloader.getSpacesLoader()
self.bucketsloader.scan(self.contentdirs)
self.spacesloader.scan(self.contentdirs)
if "system" not in self.spacesloader.spaces:
raise RuntimeError("could not find system space")
self.spacesloader.spaces["system"].loadDocProcessor() #need to make sure we have content for the systemspace
def getContentDirs(self):
"""
walk over known content dirs & execute loader on it
"""
cfgpath = j.system.fs.joinPaths(self.cfgdir, "contentdirs.cfg")
def append(path):
path=j.system.fs.pathNormalize(path)
if path not in self.contentdirs:
self.contentdirs.append(path)
if j.system.fs.exists(cfgpath):
wikicfg = j.system.fs.fileGetContents(cfgpath)
paths = wikicfg.split("\n")
for path in paths:
path = path.strip()
if path=="" or path[0]=="#":
continue
path=path.replace("\\","/")
if path[0] != "/" and path.find(":") == -1:
path = j.system.fs.joinPaths(j.system.fs.getParent(self.cfgdir), path)
append(path)
#add own base path
self.basepath = j.system.fs.joinPaths(j.system.fs.getParent(self.cfgdir), "base")
j.system.fs.createDir(self.basepath)
append(self.basepath)
#add base path of parent portal
appdir = self.appdir
append(j.system.fs.joinPaths(appdir, "wiki"))
append(j.system.fs.joinPaths(appdir, "system"))
def unloadActorFromRoutes(self, appname, actorname):
for key in self.routes.keys():
appn, actorn, remaining = key.split("_", 2)
# print appn+" "+appname+" "+actorn+" "+actorname
if appn == appname and actorn == actorname:
self.routes.pop(key)
##################### USER RIGHTS
def getUserRight(self, ctx, space):
if space == "" or space not in self.spacesloader.spaces:
space = "system"
spaceobject = self.spacesloader.spaces[space]
# print "spaceobject"
# print spaceobject.model
if "user" in ctx.env['beaker.session']:
username = ctx.env['beaker.session']["user"]
else:
username = ""
if username == "":
right = ""
else:
if username=="guest":
groupsusers=["guest","guests"]
else:
groupsusers=self.auth.getGroups(username)
right = ""
if "admin" in groupsusers:
right = "*"
# print "groupsusers:%s"%groupsusers
if right == "":
for groupuser in groupsusers:
if groupuser in spaceobject.model.acl:
# found match
right = spaceobject.model.acl[groupuser]
break
# if right == "":
# #check bitbucket
# for key,acl in spaceobject.model.acl.iteritems():
# if key.find("bitbucket")==0:
# from IPython import embed
# print "DEBUG NOW ooooooo"
# embed()
if right == "*":
right = "rwa"
# print "right:%s" % right
return username, right
def getUserFromCTX(self,ctx):
return str(ctx.env["beaker.session"]["user"])
def getGroupsFromCTX(self,ctx):
groups=self.auth.getGroups(ctx.env["beaker.session"]["user"])
return [str(item.lower()) for item in groups]
def isAdminFromCTX(self,ctx):
groups=self.getGroupsFromCTX(ctx)
for gr in groups:
if gr in self.admingroups:
return True
return False
def isLoggedInFromCTX(self,ctx):
user=self.getUserFromCTX(ctx)
if user<>"" and user<>"guest":
return True
return False
##################### process pages, get docs
def getpage(self):
page = j.tools.docgenerator.pageNewHTML("index.html", htmllibPath="/jslib")
return page
def sendpage(self, page, start_response):
contenttype = "text/html"
start_response('200 OK', [('Content-Type', contenttype), ])
return [page.getContent()]
def getDoc(self, space, name, ctx, params={}):
print "GETDOC:%s" % space
space = space.lower()
name = name.lower()
if name in ["login", "error", "accessdenied", "pagenotfound"]:
right = "r"
if space == "" and name == "":
space = self.defaultspace
name = self.defaultpage
username, right = self.getUserRight(ctx, space)
print "# space:%s name:%s user:%s right:%s" % (space, name, username, right)
if "r" not in right:
space = 'system'
name = "accessdenied"
if name != "accessdenied" and name != "pagenotfound":
# check security
if right == "":
params["space"] = space
params["page"] = name
doc, params = self.getDoc(space, "accessdenied", ctx, params=params)
return doc, params
else:
right = "r"
if space not in self.spacesloader.spaces:
if space == "system":
raise RuntimeError("wiki has not loaded system space, cannot continue")
print "could not find space %s" % space
doc, params = self.getDoc("system", "pagenotfound", ctx, params)
if "space" not in params:
params["space"] = space
if "page" not in params:
params["page"] = name
print "could not find space %s" % space
ctx.params["error"] = "Could not find space %s\n" % space
else:
spaceObject = self.spacesloader.getLoaderFromId(space)
if spaceObject.docprocessor == None:
spaceObject.loadDocProcessor(force=True) # dynamic load of space
spacedocgen = spaceObject.docprocessor
if name != "" and name in spacedocgen.name2doc:
doc = spacedocgen.name2doc[name]
else:
if name == "accessdenied":
# means the accessdenied page does not exist
doc, params = self.getDoc("system", "accessdenied", ctx, params)
return doc, params
if name == "pagenotfound":
# means the nofound page does not exist
doc, params = self.getDoc("system", "pagenotfound", ctx, params)
ctx.start_response("404 Not found", [])
return doc, params
if name == "":
if space in spacedocgen.name2doc:
doc = spacedocgen.name2doc[space]
elif "home" in spacedocgen.name2doc:
doc = spacedocgen.name2doc["home"]
else:
ctx.params["path"] = "space:%s pagename:%s" % (space, name)
# print ctx.params["path"]
if "space" not in params:
params["space"] = space
if "page" not in params:
params["page"] = name
doc, params = self.getDoc(space, "pagenotfound", ctx, params)
else:
ctx.params["path"] = "space:%s pagename:%s" % (space, name)
doc, params = self.getDoc(space, "pagenotfound", ctx, params)
ctx.params["rights"] = right
doc.loadFromDisk()
if name == "pagenotfound":
ctx.start_response("404 Not found", [])
return doc, params
def returnDoc(self, ctx, start_response, space, docname, extraParams={}):
doc, params = self.getDoc(space, docname, ctx, params=ctx.params)
if doc.dirty or "reload" in ctx.params:
doc.loadFromDisk()
doc.preprocess()
ctx.params.update(extraParams)
# doc.applyParams(ctx.params)
content,doc = doc.executeMacrosDynamicWiki(paramsExtra=extraParams, ctx=ctx)
page = self.confluence2htmlconvertor.convert(content, doc=doc, requestContext=ctx, page=self.getpage(), paramsExtra=ctx.params)
if not 'postprocess' in page.processparameters or page.processparameters['postprocess']:
page.body = page.body.replace("$$space", space)
page.body = page.body.replace("$$page", doc.original_name)
page.body = page.body.replace("$$path", doc.path)
page.body = page.body.replace("$$querystr", ctx.env['QUERY_STRING'])
page.body = page.body.replace("$$$menuright", "")
if "todestruct" in doc.__dict__:
doc.destructed = True
ctx.start_response('200 OK', [('Content-Type', "text/html"), ])
return page
def processor_page(self, environ, start_response, wwwroot, path, prefix="", webprefix="", index=False,includedocs=False,ctx=None,space=None):
def indexignore(item):
ext = item.split(".")[-1].lower()
if ext in ["pyc", "pyo", "bak"]:
return True
if item[0] == "_":
return True
if item[0] == ".":
return True
return False
def formatContent(contenttype, path, template, start_response):
content = j.system.fs.fileGetContents(path)
page = self.getpage()
page.addCodeBlock(content, template, edit=True)
start_response('200 OK', [('Content-Type', contenttype), ])
return [str(page)]
def processHtml(contenttype, path, start_response,ctx,space):
content = j.system.fs.fileGetContents(path)
r = r"\[\[.*\]\]" #@todo does not seem right to me
for match in j.codetools.regex.yieldRegexMatches(r, content):
docname = match.founditem.replace("[", "").replace("]", "")
doc, params = self.getDoc(space, docname, ctx, params=ctx.params)
if doc.name=='pagenotfound':
content=content.replace(match.founditem,"*****CONTENT '%s' NOT FOUND******"%docname)
else:
content2,doc = doc.executeMacrosDynamicWiki(paramsExtra={}, ctx=ctx)
page = self.confluence2htmlconvertor.convert(content2, doc=doc, requestContext=ctx, page=self.getpage(), paramsExtra=ctx.params)
page.body = page.body.replace("$$space", space)
page.body = page.body.replace("$$page", doc.original_name)
page.body = page.body.replace("$$path", doc.path)
page.body = page.body.replace("$$querystr", ctx.env['QUERY_STRING'])
page.body = page.body.replace("$$$menuright", "")
content=content.replace(match.founditem,page.body)
start_response('200 OK', [('Content-Type', "text/html"), ])
return [content]
def removePrefixes(path):
path = path.replace("\\", "/")
path = path.replace("//", "/")
path = path.replace("//", "/")
while len(path) > 0 and path[0] == "/":
path = path[1:]
while path.find(webprefix + "/") == 0:
path = path[len(webprefix) + 1:]
while path.find(prefix + "/") == 0:
path = path[len(prefix) + 1:]
return path
def send_file(file_path, size):
# print "sendfile:%s" % path
f = open(file_path, "rb")
block = f.read(BLOCK_SIZE * 10)
BLOCK_SIZE2 = 0
# print "%s %s" % (file_path,size)
while BLOCK_SIZE2 < size:
BLOCK_SIZE2 += len(block)
# print BLOCK_SIZE2
# print len(block)
yield block
block = f.read(BLOCK_SIZE)
# print "endfile"
wwwroot = wwwroot.replace("\\", "/").strip()
path = removePrefixes(path)
# print "wwwroot:%s" % wwwroot
if not wwwroot.replace("/", "") == "":
pathfull = wwwroot + "/" + path
else:
pathfull = path
contenttype = "text/html"
content = ""
headers = list()
ext = path.split(".")[-1].lower()
contenttype = mimetypes.guess_type(pathfull)[0]
if path == "favicon.ico":
pathfull = "wiki/System/favicon.ico"
if not j.system.fs.exists(pathfull):
if j.system.fs.exists(pathfull + '.gz') and 'gzip' in environ.get('HTTP_ACCEPT_ENCODING'):
pathfull += ".gz"
headers.append(('Vary', 'Accept-Encoding'))
headers.append(('Content-Encoding', 'gzip'))
else:
print "error"
headers = [('Content-Type', contenttype), ]
start_response("404 Not found", headers)
return ["path %s not found" % path]
size = os.path.getsize(pathfull)
if ext == "html":
return processHtml(contenttype, pathfull, start_response,ctx,space)
elif ext == "wiki":
contenttype = "text/html"
# return formatWikiContent(pathfull,start_response)
return formatContent(contenttype, pathfull, "python", start_response)
elif ext == "py":
contenttype = "text/html"
return formatContent(contenttype, pathfull, "python", start_response)
elif ext == "spec":
contenttype = "text/html"
return formatContent(contenttype, pathfull, "python", start_response)
# print contenttype
status = '200 OK'
headers.append(('Content-Type', contenttype))
headers.append(("Content-length", str(size)))
headers.append(("Cache-Control", 'public,max-age=3600'))
start_response(status, headers)
if content != "":
return [content]
else:
return send_file(pathfull, size)
def process_elfinder(self, path, ctx):
from JumpScale.portal.html import elFinder
db = j.db.keyvaluestore.getMemoryStore('elfinder')
rootpath = db.cacheGet(path)
options = {'root': rootpath, 'dotFiles': True}
con = elFinder.connector(options)
params = ctx.params.copy()
if 'rawdata' in params:
from JumpScale.portal.html import multipart
from cStringIO import StringIO
ctx.env.pop('wsgi.input', None)
stream = StringIO(ctx.params.pop('rawdata'))
forms, files = multipart.parse_form_data(ctx.env, stream=stream)
params.update(forms)
for key, value in files.iteritems():
if key == 'upload[]':
params['upload[]'] = dict()
params['upload[]'][value.filename] = value.file
if params.get('init') == '1':
params.pop('target', None)
status, header, response = con.run(params)
status = '%s' % status
headers = [ (k, v) for k,v in header.iteritems() ]
ctx.start_response(status, headers)
if 'download' not in params:
response = j.db.serializers.getSerializerType('j').dumps(response)
else:
response = response['content']
return [response]
def path2spacePagename(self, path):
pagename = ""
if path.find("?") != -1:
path = path.split("?")[0]
while len(path) > 0 and path[-1] == "/":
path = path[:-1]
if path.find("/") == -1:
space = path.strip()
else:
splitted = path.split("/")
space = splitted[0].lower()
pagename = splitted[-1].lower()
return space, pagename
##################### FORMATTING + logs/raiseerror
def log(self, ctx, user, path, space="", pagename=""):
path2 = j.system.fs.joinPaths(self.logdir, "user_%s.log" % user)
epoch = j.base.time.getTimeEpoch() + 3600 * 6
hrtime = j.base.time.epoch2HRDateTime(epoch)
if False and self.geoIP != None: # @todo fix geoip, also make sure nginx forwards the right info
ee = self.geoIP.record_by_addr(ctx.env["REMOTE_ADDR"])
loc = "%s_%s_%s" % (ee["area_code"], ee["city"], ee["region_name"])
else:
loc = ""
msg = "%s|%s|%s|%s|%s|%s|%s\n" % (hrtime, ctx.env["REMOTE_ADDR"], epoch, space, pagename, path, loc)
j.system.fs.writeFile(path2, msg, True)
if space != "":
msg = "%s|%s|%s|%s|%s|%s|%s\n" % (hrtime, ctx.env["REMOTE_ADDR"], epoch, user, pagename, path, loc)
pathSpace = j.system.fs.joinPaths(self.logdir, "space_%s.log" % space)
j.system.fs.writeFile(pathSpace, msg, True)
def raiseError(self, ctx, msg="", msginfo="", errorObject=None, httpcode="500 Internal Server Error"):
"""
"""
if not ctx.checkFormat():
# error in format
eco = j.errorconditionhandler.getErrorConditionObject()
eco.errormessage = "only format supported = human or json, format is put with param &format=..."
eco.type = "INPUT"
print "WRONG FORMAT"
else:
if errorObject != None:
eco = errorObject
else:
eco = j.errorconditionhandler.getErrorConditionObject()
method = ctx.env["PATH_INFO"]
remoteAddress = ctx.env["REMOTE_ADDR"]
queryString = ctx.env["QUERY_STRING"]
eco.caller = remoteAddress
if msg != "":
eco.errormessage = msg
else:
eco.errormessage = ""
if msginfo != "":
eco.errormessage += "\msginfo was:\n%s" % msginfo
if queryString != "":
eco.errormessage += "\nquerystr was:%s" % queryString
if method != "":
eco.errormessage += "\nmethod was:%s" % method
eco.process()
if ctx.fformat == "human" or ctx.fformat == "text":
if msginfo != None and msginfo != "":
msg += "\n<br>%s" % msginfo
else:
msg += "\n%s" % eco
msg = self._text2html(msg)
else:
# is json
# result=[]
# result["error"]=eco.obj2dict()
def todict(obj):
data = {}
for key, value in obj.__dict__.iteritems():
try:
data[key] = todict(value)
except AttributeError:
data[key] = value
return data
eco.tb=""
eco.frames=[]
msg = j.db.serializers.getSerializerType('j').dumps(todict(eco))
ctx.start_response(httpcode, [('Content-Type', 'text/html')])
j.console.echo("***ERROR***:%s : method %s from ip %s with params %s" % (
eco, method, remoteAddress, queryString), 2)
if j.application.debug:
return msg
else:
return "An unexpected error has occurred, please try again later."
def _text2html(self, text):
text = text.replace("\n", "<br>")
# text=text.replace(" "," ")
return text
def _text2htmlSerializer(self, content):
return self._text2html(pprint.pformat(content))
def _resultjsonSerializer(self, content):
return j.db.serializers.getSerializerType('j').dumps({"result": content})
def _resultyamlSerializer(self, content):
return j.code.object2yaml({"result": content})
def getMimeType(self, contenttype, format_types):
supported_types = ["text/plain", "text/html", "application/yaml", "application/json"]
CONTENT_TYPES = {
"text/plain": str,
"text/html": self._text2htmlSerializer,
"application/yaml": self._resultyamlSerializer,
"application/json": j.db.serializers.getSerializerType('j').dumps
}
if not contenttype:
serializer = format_types["text"]["serializer"]
return CONTENT_TYPE_HTML, serializer
else:
mimeType = mimeparse.best_match(supported_types, contenttype)
serializer = CONTENT_TYPES[mimeType]
return mimeType, serializer
def reformatOutput(self, ctx, result, restreturn=False):
FFORMAT_TYPES = {
"text": {"content_type": CONTENT_TYPE_HTML, "serializer": self._text2htmlSerializer},
"html": {"content_type": CONTENT_TYPE_HTML, "serializer": self._text2htmlSerializer},
"raw": {"content_type": CONTENT_TYPE_PLAIN, "serializer": str},
"jsonraw": {"content_type": CONTENT_TYPE_JSON, "serializer": j.db.serializers.getSerializerType('j').dumps},
"json": {"content_type": CONTENT_TYPE_JSON, "serializer": self._resultjsonSerializer},
"yaml": {"content_type": CONTENT_TYPE_YAML, "serializer": self._resultyamlSerializer}
}
if '_jsonp' in ctx.params:
result = {'httpStatus': ctx.httpStatus, 'httpMessage': ctx.httpMessage, 'body': result}
return CONTENT_TYPE_JS, "%s(%s);" % (ctx.params['_jsonp'], j.db.serializers.getSerializerType('j').dumps(result))
if ctx._response_started:
return None, result
fformat = ctx.fformat
if '_png' in ctx.params:
return CONTENT_TYPE_PNG, result
if "CONTENT_TYPE" not in ctx.env:
ctx.env['CONTENT_TYPE'] = CONTENT_TYPE_PLAIN
if ctx.env['CONTENT_TYPE'].find("form-") != -1:
ctx.env['CONTENT_TYPE'] = CONTENT_TYPE_PLAIN
# normally HTTP_ACCEPT defines the return type we should rewrite this
if fformat:
# extra format paramter overrides http_accept header
return FFORMAT_TYPES[fformat]['content_type'], FFORMAT_TYPES[fformat]['serializer'](result)
else:
if 'HTTP_ACCEPT' in ctx.env:
returntype = ctx.env['HTTP_ACCEPT']
else:
returntype = ctx.env['CONTENT_TYPE']
content_type, serializer = self.getMimeType(returntype, FFORMAT_TYPES)
return content_type, serializer(result)
##################### router
def startSession(self, ctx, path):
session = ctx.env['beaker.session']
if "authkey" in ctx.params:
# user is authenticated by a special key
key = ctx.params["authkey"]
if self.auth.existsKey(key):
username = self.auth.getUserFromKey(key)
session['user'] = username
session.save()
elif key == self.secret:
session['user'] = 'admin'
session.save()
else:
# check if authkey is a session
newsession = session.get_by_id(key)
if newsession:
session = newsession
ctx.env['beaker.session'] = session
else:
ctx.start_response('419 Authentication Timeout', [])
return False, [str(self.returnDoc(ctx, ctx.start_response, "system", "accessdenied", extraParams={"path": path}))]
if "user_logoff_" in ctx.params and not "user_login_" in ctx.params:
session.delete()
return False, [str(self.returnDoc(ctx, ctx.start_response, "system", "login", extraParams={"path": path}))]
if "user_login_" in ctx.params:
# user has filled in his login details, this is response on posted info
name = ctx.params['user_login_']
if not ctx.params.has_key('passwd'):
secret=""
else:
secret = ctx.params['passwd']
if self.auth.authenticate(name, secret):
session['user'] = name
if "querystr" in session:
ctx.env['QUERY_STRING'] = session['querystr']
else:
ctx.env['QUERY_STRING'] = ""
session.save()
# user is loging in from login page redirect him to home
if path.endswith('system/login'):
status = '302'
headers = [
('Location', "/"),
]
ctx.start_response(status, headers)
return False, [""]
else:
session['user'] = ""
session["querystr"] = ""
session.save()
return False, [str(self.returnDoc(ctx, ctx.start_response, "system", "accessdenied", extraParams={"path": path}))]
if "user" not in session or session["user"] == "":
session['user'] = "guest"
session.save()
if "querystr" in session:
session["querystr"] = ""
session.save()
return True, session
def _getParamsFromEnv(self, env, ctx):
params = urlparse.parse_qs(env["QUERY_STRING"])
# HTTP parameters can be repeated multiple times, i.e. in case of using <select multiple>
# Example: a=1&b=2&a=3
#
# urlparse.parse_qs returns a dictionary of names & list of values. Then it's simplified
# for lists with only a single element, e.g.
#
# {'a': ['1', '3'], 'b': ['2']}
#
# simplified to be
#
# {'a': ['1', '3'], 'b': '2'}
params = dict(((k, v) if len(v) > 1 else (k, v[0])) for k, v in params.items())
if env["REQUEST_METHOD"] in ("POST", "PUT"):
postData = env["wsgi.input"].read()
if postData.strip() == "":
return params
msg = "postdata cannot be empty"
self.raiseError(ctx, msg)
if env['CONTENT_TYPE'].find("application/json") != -1:
postParams = j.db.serializers.getSerializerType('j').loads(postData)
if postParams:
params.update(postParams)
return params
elif env['CONTENT_TYPE'].find("www-form-urlencoded") != -1:
params.update(dict(urlparse.parse_qsl(postData)))
return params
else:
params['rawdata'] = postData
return params
def router(self, environ, start_response):
path = environ["PATH_INFO"].lstrip("/")
print "path:%s" % path
pathparts = path.split('/')
if pathparts[0] == 'wiki':
pathparts = pathparts[1:]
if path.find("favicon.ico") != -1:
return self.processor_page(environ, start_response, self.filesroot, "favicon.ico", prefix="")
ctx = RequestContext(application="", actor="", method="", env=environ,
start_response=start_response, path=path, params=None)
ctx.params = self._getParamsFromEnv(environ, ctx)
if path.find("jslib/") == 0:
path = path[6:]
user = "None"
# self.log(ctx, user, path)
return self.processor_page(environ, start_response, self.jslibroot, path, prefix="jslib/")
if path.find("images/") == 0:
space, image = pathparts[1:3]
spaceObject = self.getSpace(space)
image = image.lower()
if image in spaceObject.docprocessor.images:
path2 = spaceObject.docprocessor.images[image]
return self.processor_page(environ, start_response, j.system.fs.getDirName(path2), j.system.fs.getBaseName(path2), prefix="images")
ctx.start_response('404', [])
if path.find("files/specs/") == 0:
path = path[6:]
user = "None"
self.log(ctx, user, path)
return self.processor_page(environ, start_response, self.filesroot, path, prefix="files/")
if path.find(".files") != -1:
user = "None"
self.log(ctx, user, path)
space = pathparts[0].lower()
path = "/".join(pathparts[2:])
sploader = self.spacesloader.getSpaceFromId(space)
filesroot = j.system.fs.joinPaths(sploader.model.path, ".files")
return self.processor_page(environ, start_response, filesroot, path, prefix="")
if path.find(".static") != -1:
user = "None"
self.log(ctx, user, path)
space, pagename = self.path2spacePagename(path)
space = pathparts[0].lower()
path = "/".join(pathparts[2:])
sploader = self.spacesloader.getSpaceFromId(space)
filesroot = j.system.fs.joinPaths(sploader.model.path, ".static")
return self.processor_page(environ, start_response, filesroot, path, prefix="",includedocs=True,ctx=ctx,space=space)
# user is logged in now
is_session, session = self.startSession(ctx, path)
if not is_session:
return session
user = session['user']
match = pathparts[0]
path = ""
if len(pathparts) > 1:
path = "/".join(pathparts[1:])
if match == "restmachine":
return self.rest.processor_rest(environ, start_response, path, human=False, ctx=ctx)
elif match == "elfinder":
return self.process_elfinder(path, ctx)
elif match == "restextmachine":
return self.rest.processor_restext(environ, start_response, path, human=False, ctx=ctx)
elif match == "rest":
space, pagename = self.path2spacePagename(path.strip("/"))
self.log(ctx, user, path, space, pagename)
return self.rest.processor_rest(environ, start_response, path, ctx=ctx)
elif match == "restext":
space, pagename = self.path2spacePagename(path.strip("/"))
self.log(ctx, user, path, space, pagename)
return self.rest.processor_restext(environ, start_response, path,
ctx=ctx)
elif match == "ping":
status = '200 OK'
headers = [
('Content-Type', "text/html"),
]
start_response(status, headers)
return ["pong"]
elif match == "files":
self.log(ctx, user, path)
return self.processor_page(environ, start_response, self.filesroot, path, prefix="files")
elif match == "specs":
return self.processor_page(environ, start_response, "specs", path, prefix="specs")
elif match == "appservercode":
return self.processor_page(environ, start_response, "code", path, prefix="code", webprefix="appservercode")
elif match == "lib":
# print self.libpath
return self.processor_page(environ, start_response, self.libpath, path, prefix="lib")
elif match == 'render':
return self.render(environ, start_response)
else:
path = '/'.join(pathparts)
ctx.params["path"] = '/'.join(pathparts)
space, pagename = self.path2spacePagename(path)
self.log(ctx, user, path, space, pagename)
return [str(self.returnDoc(ctx, start_response, space, pagename, {}))]
def render(self, environ, start_response):
path = environ["PATH_INFO"].lstrip("/")
query_string = environ["QUERY_STRING"].lstrip("/")
params = cgi.parse_qs(query_string)
content = params.get('content', [''])[0]
space = params.get('render_space', None)
if space:
space = space[0]
else:
start_response('200 OK', [('Content-Type', "text/html")])
return 'Parameter "space" not supplied'
doc = params.get('render_doc', None)
if doc:
doc = doc[0]
else:
start_response('200 OK', [('Content-Type', "text/html")])
return 'Parameter "doc" not supplied'
ctx = RequestContext(application="", actor="", method="", env=environ,
start_response=start_response, path=path, params=None)
ctx.params = self._getParamsFromEnv(environ, ctx)
doc, _ = self.getDoc(space, doc, ctx)
doc = doc.copy()
doc.source = content
doc.loadFromSource()
doc.preprocess()
content, doc = doc.executeMacrosDynamicWiki(ctx=ctx)
page = self.confluence2htmlconvertor.convert(content, doc=doc, requestContext=ctx, page=self.getpage(), paramsExtra=ctx.params)
if not 'postprocess' in page.processparameters or page.processparameters['postprocess']:
page.body = page.body.replace("$$space", space)
page.body = page.body.replace("$$page", doc.original_name)
page.body = page.body.replace("$$path", doc.path)
page.body = page.body.replace("$$querystr", ctx.env['QUERY_STRING'])
page.body = page.body.replace("$$$menuright", "")
if "todestruct" in doc.__dict__:
doc.destructed = True
start_response('200 OK', [('Content-Type', "text/html")])
return str(page)
def addRoute(self, function, appname, actor, method, paramvalidation={}, paramdescription={}, \
paramoptional={}, description="", auth=True, returnformat=None):
"""
@param function is the function which will be called as follows: function(webserver,path,params):
function can also be a string, then only the string will be returned
if str=='taskletengine' will directly call the taskletengine e.g. for std method calls from actors
@appname e.g. system is 1e part of url which is routed http://localhost/appname/actor/method/
@actor e.g. system is 2nd part of url which is routed http://localhost/appname/actor/method/
@method e.g. "test" is part of url which is routed e.g. http://localhost/appname/actor/method/
@paramvalidation e.g. {"name":"\w+","color":""} the values are regexes
@paramdescription is optional e.g. {"name":"this is the description for name"}
@auth is for authentication if false then there will be no auth key checked
example function called
def test(self,webserver,path,params):
return 'hello world!!'
or without the self in the functioncall (when no class method)
what you return is being send to the browser
example call: http://localhost:9999/test?key=1234&color=dd&name=dd
"""
appname = appname.replace("_", ".")
actor = actor.replace("_", ".")
method = method.replace("_", ".")
self.app_actor_dict["%s_%s" % (appname, actor)] = 1
methoddict = {'get': 'GET', 'set': 'PUT', 'new': 'POST', 'delete': 'DELETE',
'find': 'GET', 'list': 'GET', 'datatables': 'GET', 'create': 'POST'}
self.routes["%s_%s_%s_%s" % ('GET', appname, actor, method)] = [function, paramvalidation, paramdescription, paramoptional, \
description, auth, returnformat]
##################### SCHEDULING
def _timer(self):
"""
will remember time every 0.5 sec
"""
lfmid = 0
while True:
self.epoch = int(time.time())
if lfmid < self.epoch - 200:
lfmid = self.epoch
self.fiveMinuteId = j.base.time.get5MinuteId(self.epoch)
self.hourId = j.base.time.getHourId(self.epoch)
self.dayId = j.base.time.getDayId(self.epoch)
gevent.sleep(0.5)
def _minRepeat(self):
while True:
gevent.sleep(5)
for key in self.schedule1min.keys():
item, args, kwargs = self.schedule1min[key]
item(*args, **kwargs)
def _15minRepeat(self):
while True:
gevent.sleep(60 * 15)
for key in self.schedule15min.keys():
item, args, kwargs = self.schedule15min[key]
item(*args, **kwargs)
def _60minRepeat(self):
while True:
gevent.sleep(60 * 60)
for key in self.schedule60min.keys():
item, args, kwargs = self.schedule60min[key]
item(*args, **kwargs)
def getNow(self):
return self.epoch
def addSchedule1MinPeriod(self, name, method, *args, **kwargs):
self.schedule1min[name] = (method, args, kwargs)
def addSchedule15MinPeriod(self, name, method, *args, **kwargs):
self.schedule15min[name] = (method, args, kwargs)
def addSchedule60MinPeriod(self, name, method, *args, **kwargs):
self.schedule60min[name] = (method, args, kwargs)
##################### START-STOP / get spaces/actors/buckets / addgreenlet
def start(self):
"""
Start the web server, serving the `routes`. When no `routes` dict is passed, serve a single 'test' route.
This method will block until an exception stops the server.
@param routes: routes to serve, will be merged with the already added routes
@type routes: dict(string, list(callable, dict(string, string), dict(string, string)))
"""
TIMER = gevent.greenlet.Greenlet(self._timer)
TIMER.start()
S1 = gevent.greenlet.Greenlet(self._minRepeat)
S1.start()
S2 = gevent.greenlet.Greenlet(self._15minRepeat)
S2.start()
S3 = gevent.greenlet.Greenlet(self._60minRepeat)
S3.start()
j.console.echo("webserver started on port %s" % self.port)
self._webserver.serve_forever()
def stop(self):
self._webserver.stop()
def getSpaces(self):
return self.spacesloader.id2object.keys()
def getBuckets(self):
return self.bucketsloader.id2object.keys()
def getActors(self):
return self.actorsloader.id2object.keys()
def getSpace(self, name, ignore_doc_processor=False):
name = name.lower()
if name not in self.spacesloader.spaces:
raise RuntimeError("Could not find space %s" % name)
space = self.spacesloader.spaces[name]
if space.docprocessor == None and not ignore_doc_processor:
space.loadDocProcessor()
return space
def loadSpace(self, name):
space = self.getSpace(name)
space.loadDocProcessor()
return space
def getBucket(self, name):
if name not in self.bucketsloader.buckets:
raise RuntimeError("Could not find bucket %s" % name)
bucket = self.bucketsloader.buckets(name)
return bucket
def addGreenlet(self, appName, greenlet):
"""
"""
greenletObject = greenlet()
if greenletObject.method == "":
raise RuntimeError("greenlet class needs to have a method")
if greenletObject.actor == "":
raise RuntimeError("greenlet class needs to have a actor")
greenletObject.server = self
self.addRoute(function=greenletObject.wscall,
appname=appName,
actor=greenletObject.actor,
method=greenletObject.method,
paramvalidation=greenletObject.paramvalidation,
paramdescription=greenletObject.paramdescription,
paramoptional=greenletObject.paramoptional,
description=greenletObject.description, auth=greenletObject.auth)
def restartInProcess(self, app):
import fcntl
args = sys.argv[:]
import ipdb;ipdb.set_trace()
args.insert(0, sys.executable)
apppath = j.system.fs.joinPaths(j.dirs.appDir, app)
if apppath == '.':
apppath = os.getcwd()
max_fd = 1024
for fd in range(3, max_fd):
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
os.chdir(apppath)
os.execv(sys.executable, args)
def __str__(self):
out=""
for key,val in self.__dict__.iteritems():
if key[0]<>"_" and key not in ["routes"]:
out+="%-35s : %s\n"%(key,val)
routes=",".join(self.routes.keys())
out+="%-35s : %s\n"%("routes",routes)
items=out.split("\n")
items.sort()
out="portalserver:"+"\n".join(items)
return out
__repr__ = __str__
| {
"content_hash": "3ca149524c2a6e3f579c1e6f77fc8d42",
"timestamp": "",
"source": "github",
"line_count": 1220,
"max_line_length": 152,
"avg_line_length": 38.722131147540985,
"alnum_prop": 0.5554920513960331,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "30b2836bbdf7e39b899bf213c246b41353b0e526",
"size": "47241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/portal/portal/PortalServer.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
from django import template
def person(user):
"""
Renders a single user object.
"""
return {'user': user}
register = template.Library()
register.inclusion_tag('profile/person.html')(person)
| {
"content_hash": "f49ae99ac5e0aab4a7360033be6280dc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 53,
"avg_line_length": 20.8,
"alnum_prop": 0.6778846153846154,
"repo_name": "mvayngrib/startthedark",
"id": "c443d31667f01a77e9402edfc8cc88ad8ae1face",
"size": "208",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "profile/templatetags/profile_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "87686"
},
{
"name": "Python",
"bytes": "33295"
},
{
"name": "Shell",
"bytes": "99"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import inspect
import os
import pkgutil
import sys
from glob import glob
from importlib import import_module
import airflow
from airflow.hooks.base import BaseHook
from airflow.models.baseoperator import BaseOperator
from airflow.secrets import BaseSecretsBackend
from airflow.sensors.base import BaseSensorOperator
if __name__ != "__main__":
raise Exception(
"This file is intended to be executed as an executable program. You cannot use it as a module."
"To run this script, run the './list-integrations.py' command"
)
AIRFLOW_ROOT = os.path.abspath(os.path.join(os.path.dirname(airflow.__file__), os.pardir))
def _find_clazzes(directory, base_class):
found_classes = set()
for module_finder, name, ispkg in pkgutil.iter_modules([directory]):
if ispkg:
continue
relative_path = os.path.relpath(module_finder.path, AIRFLOW_ROOT)
package_name = relative_path.replace("/", ".")
full_module_name = package_name + "." + name
try:
mod = import_module(full_module_name)
except ModuleNotFoundError:
print(f"Module {full_module_name} can not be loaded.", file=sys.stderr)
continue
clazzes = inspect.getmembers(mod, inspect.isclass)
integration_clazzes = [
clazz
for name, clazz in clazzes
if issubclass(clazz, base_class) and clazz.__module__.startswith(package_name)
]
for found_clazz in integration_clazzes:
found_classes.add(f"{found_clazz.__module__}.{found_clazz.__name__}")
return found_classes
program = "./" + os.path.basename(sys.argv[0])
HELP = """\
List operators, hooks, sensors, secrets backend in the installed Airflow.
You can combine this script with other tools e.g. awk, grep, cut, uniq, sort.
"""
EPILOG = f"""
Examples:
If you want to display only sensors, you can execute the following command.
{program} | grep sensors
If you want to display only secrets backend, you can execute the following command.
{program} | grep secrets
If you want to count the operators/sensors in each providers package, you can use the following command.
{program} | \\
grep providers | \\
grep 'sensors\\|operators' | \\
cut -d "." -f 3 | \\
uniq -c | \\
sort -n -r
"""
parser = argparse.ArgumentParser( # noqa
description=HELP, formatter_class=argparse.RawTextHelpFormatter, epilog=EPILOG
)
# argparse handle `-h/--help/` internally
parser.parse_args()
RESOURCE_TYPES = {
"secrets": BaseSecretsBackend,
"operators": BaseOperator,
"sensors": BaseSensorOperator,
"hooks": BaseHook,
}
for integration_base_directory, integration_class in RESOURCE_TYPES.items():
for integration_directory in glob(
f"{AIRFLOW_ROOT}/airflow/**/{integration_base_directory}", recursive=True
):
if "contrib" in integration_directory:
continue
for clazz_to_print in sorted(_find_clazzes(integration_base_directory, integration_class)):
print(clazz_to_print)
| {
"content_hash": "3e5df5ebe984308b2363f3bfdbf8c53c",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 104,
"avg_line_length": 32.90756302521008,
"alnum_prop": 0.6892236976506639,
"repo_name": "airbnb/airflow",
"id": "8a6863e9296f4c45259a93f71e29d83ee47a75da",
"size": "3916",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/tools/list-integrations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36374"
},
{
"name": "HTML",
"bytes": "99535"
},
{
"name": "JavaScript",
"bytes": "891618"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "796220"
},
{
"name": "Shell",
"bytes": "9040"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from core.model import Model
from utils.init_weights import init_weights, normalized_columns_initializer
class A3CMlpMinisimModel(Model):
def __init__(self, args):
super(A3CMlpMinisimModel, self).__init__(args)
self.num_robots = args.num_robots
self.hist_len = args.hist_len
self.hidden_vb_dim = args.hidden_vb_dim
# build model
# 0. feature layers
self.fc1 = nn.Linear(self.input_dims[0] * self.input_dims[1], self.hidden_dim)
self.rl1 = nn.ReLU()
self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl2 = nn.ReLU()
self.fc3 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl3 = nn.ReLU()
self.fc4 = nn.Linear(self.hidden_dim, self.hidden_dim)
self.rl4 = nn.ReLU()
# lstm
if self.enable_lstm:
self.lstm = nn.LSTMCell(self.hidden_dim, self.hidden_vb_dim, 1)
next_dim = self.hidden_vb_dim
else:
next_dim = self.hidden_dim
# 1. policy output
self.policy_5 = nn.Linear(next_dim + 2 * self.hist_len, self.output_dims)
self.policy_6 = nn.Softmax()
# 2. value output
self.value_5 = nn.Linear(next_dim + 2 * self.hist_len, 1)
self._reset()
def _init_weights(self):
self.apply(init_weights)
self.fc1.weight.data = normalized_columns_initializer(self.fc1.weight.data, 0.01)
self.fc1.bias.data.fill_(0)
self.fc2.weight.data = normalized_columns_initializer(self.fc2.weight.data, 0.01)
self.fc2.bias.data.fill_(0)
self.fc3.weight.data = normalized_columns_initializer(self.fc3.weight.data, 0.01)
self.fc3.bias.data.fill_(0)
self.fc4.weight.data = normalized_columns_initializer(self.fc4.weight.data, 0.01)
self.fc4.bias.data.fill_(0)
self.policy_5.weight.data = normalized_columns_initializer(self.policy_5.weight.data, 0.01)
self.policy_5.bias.data.fill_(0)
self.value_5.weight.data = normalized_columns_initializer(self.value_5.weight.data, 1.0)
self.value_5.bias.data.fill_(0)
self.lstm.bias_ih.data.fill_(0)
self.lstm.bias_hh.data.fill_(0)
def forward(self, x, lstm_hidden_vb=None):
if self.hist_len > 1:
target_data = x[:, :, self.input_dims[1]:self.input_dims[1] + 2 * self.num_robots * self.hist_len]
target_data = target_data.contiguous().view(target_data.size(0), 2 * self.num_robots * self.hist_len)
laser_scans = x[:, :, :self.input_dims[1]]
else:
target_data = x[:, self.input_dims[1]:self.input_dims[1] + 2 * self.num_robots]
target_data = target_data.contiguous().view(target_data.size(0), 2 * self.num_robots)
laser_scans = x[:, :self.input_dims[1]]
# TODO: contiguous here will slow everything down a lot?
x = laser_scans.contiguous().view(laser_scans.size(0), self.input_dims[0] * self.input_dims[1])
x = self.rl1(self.fc1(x))
x = self.rl2(self.fc2(x))
x = self.rl3(self.fc3(x))
x = self.rl4(self.fc4(x))
x = x.view(-1, self.hidden_dim)
if self.enable_lstm:
x, c = self.lstm(x, lstm_hidden_vb)
x_aug = torch.cat((x, target_data), 1)
p = self.policy_5(x_aug)
p = self.policy_6(p)
v = self.value_5(x_aug)
if self.enable_lstm:
return p, v, (x, c)
else:
return p, v
| {
"content_hash": "b72727941cc1cd8feb5ee449437c0164",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 113,
"avg_line_length": 40.35164835164835,
"alnum_prop": 0.6043028322440087,
"repo_name": "AlekseyZhelo/pytorch-rl",
"id": "b5748ee201ffde0582187fbc127a652613e9b4d7",
"size": "3672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/minisim/models/mini_wide.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "343662"
},
{
"name": "Shell",
"bytes": "5413"
}
],
"symlink_target": ""
} |
from fobi.base import FormElementPluginWidget
from . import UID
__title__ = 'fobi.contrib.plugins.form_elements.test.dummy.widgets'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('BaseDummyPluginWidget',)
class BaseDummyPluginWidget(FormElementPluginWidget):
"""Base dummy form element plugin widget."""
plugin_uid = UID
| {
"content_hash": "e4d61c67c9674ecbadc1b8b8de095fa0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 29.133333333333333,
"alnum_prop": 0.7231121281464531,
"repo_name": "mansonul/events",
"id": "6439786fc02f24136f6cabfe3aec78180d38f26f",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/contrib/plugins/form_elements/test/dummy/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "90251"
},
{
"name": "HTML",
"bytes": "186225"
},
{
"name": "JavaScript",
"bytes": "43221"
},
{
"name": "Python",
"bytes": "804726"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from run_manager import views
urlpatterns = [
url(r'^create_run/$', views.create_run),
url(r'^view_runs/$', views.view_runs),
url(r'^delete_run/$', views.delete_run),
url(r'^start_run/$', views.start_run),
url(r'^stop_run/$', views.stop_run),
url(r'^run_status/$', views.run_status),
url(r'^create_script/$', views.create_script),
url(r'^get_scripts/$', views.get_scripts),
url(r'^update_script/$', views.update_script),
url(r'^read_script/$', views.read_script),
url(r'^read_output_script/$', views.read_output_script),
url(r'^delete_script/$', views.delete_script),
url(r'^get_templates/$', views.get_templates),
url(r'^copy_template/$', views.copy_template),
url(r'^get_user/$', views.get_user),
url(r'^get_output_zip/$', views.get_output_zip),
url(r'^save_diagnostic_config/$', views.save_diagnostic_config),
url(r'^delete_diagnostic_config/$', views.delete_diagnostic_config),
url(r'^get_diagnostic_configs/$', views.get_diagnostic_configs),
url(r'^get_diagnostic_by_name/$', views.get_diagnostic_by_name),
url(r'^get_all_configs/$', views.get_all_configs),
url(r'^get_run_output/$', views.get_run_output),
]
| {
"content_hash": "b5cca6fff747e8e49bef1067369c0c73",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 72,
"avg_line_length": 44.17857142857143,
"alnum_prop": 0.6491511721907841,
"repo_name": "ACME-OUI/acme-web-fe",
"id": "7d287b4d582f1888d0f85d9ed2f492e1358811ff",
"size": "1237",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/run_manager/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "198516"
},
{
"name": "HTML",
"bytes": "80359"
},
{
"name": "JavaScript",
"bytes": "1133371"
},
{
"name": "Python",
"bytes": "235717"
},
{
"name": "Shell",
"bytes": "139530"
}
],
"symlink_target": ""
} |
"""Support for Fronius devices."""
import copy
from datetime import timedelta
import logging
import voluptuous as vol
from pyfronius import Fronius
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_RESOURCE,
CONF_SENSOR_TYPE,
CONF_DEVICE,
CONF_MONITORED_CONDITIONS,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
CONF_SCOPE = "scope"
TYPE_INVERTER = "inverter"
TYPE_STORAGE = "storage"
TYPE_METER = "meter"
TYPE_POWER_FLOW = "power_flow"
SCOPE_DEVICE = "device"
SCOPE_SYSTEM = "system"
DEFAULT_SCOPE = SCOPE_DEVICE
DEFAULT_DEVICE = 0
DEFAULT_INVERTER = 1
DEFAULT_SCAN_INTERVAL = timedelta(seconds=60)
SENSOR_TYPES = [TYPE_INVERTER, TYPE_STORAGE, TYPE_METER, TYPE_POWER_FLOW]
SCOPE_TYPES = [SCOPE_DEVICE, SCOPE_SYSTEM]
def _device_id_validator(config):
"""Ensure that inverters have default id 1 and other devices 0."""
config = copy.deepcopy(config)
for cond in config[CONF_MONITORED_CONDITIONS]:
if CONF_DEVICE not in cond:
if cond[CONF_SENSOR_TYPE] == TYPE_INVERTER:
cond[CONF_DEVICE] = DEFAULT_INVERTER
else:
cond[CONF_DEVICE] = DEFAULT_DEVICE
return config
PLATFORM_SCHEMA = vol.Schema(
vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Optional(CONF_SCOPE, default=DEFAULT_SCOPE): vol.In(
SCOPE_TYPES
),
vol.Optional(CONF_DEVICE): vol.All(
vol.Coerce(int), vol.Range(min=0)
),
}
],
),
}
),
_device_id_validator,
)
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up of Fronius platform."""
session = async_get_clientsession(hass)
fronius = Fronius(session, config[CONF_RESOURCE])
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
adapters = []
# Creates all adapters for monitored conditions
for condition in config[CONF_MONITORED_CONDITIONS]:
device = condition[CONF_DEVICE]
sensor_type = condition[CONF_SENSOR_TYPE]
scope = condition[CONF_SCOPE]
name = "Fronius {} {} {}".format(
condition[CONF_SENSOR_TYPE].replace("_", " ").capitalize(),
device if scope == SCOPE_DEVICE else SCOPE_SYSTEM,
config[CONF_RESOURCE],
)
if sensor_type == TYPE_INVERTER:
if scope == SCOPE_SYSTEM:
adapter_cls = FroniusInverterSystem
else:
adapter_cls = FroniusInverterDevice
elif sensor_type == TYPE_METER:
if scope == SCOPE_SYSTEM:
adapter_cls = FroniusMeterSystem
else:
adapter_cls = FroniusMeterDevice
elif sensor_type == TYPE_POWER_FLOW:
adapter_cls = FroniusPowerFlow
else:
adapter_cls = FroniusStorage
adapters.append(adapter_cls(fronius, name, device, async_add_entities))
# Creates a lamdba that fetches an update when called
def adapter_data_fetcher(data_adapter):
async def fetch_data(*_):
await data_adapter.async_update()
return fetch_data
# Set up the fetching in a fixed interval for each adapter
for adapter in adapters:
fetch = adapter_data_fetcher(adapter)
# fetch data once at set-up
await fetch()
async_track_time_interval(hass, fetch, scan_interval)
class FroniusAdapter:
"""The Fronius sensor fetching component."""
def __init__(self, bridge, name, device, add_entities):
"""Initialize the sensor."""
self.bridge = bridge
self._name = name
self._device = device
self._fetched = {}
self.sensors = set()
self._registered_sensors = set()
self._add_entities = add_entities
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def data(self):
"""Return the state attributes."""
return self._fetched
async def async_update(self):
"""Retrieve and update latest state."""
values = {}
try:
values = await self._update()
except ConnectionError:
_LOGGER.error("Failed to update: connection error")
except ValueError:
_LOGGER.error(
"Failed to update: invalid response returned."
"Maybe the configured device is not supported"
)
if not values:
return
attributes = self._fetched
# Copy data of current fronius device
for key, entry in values.items():
# If the data is directly a sensor
if "value" in entry:
attributes[key] = entry
self._fetched = attributes
# Add discovered value fields as sensors
# because some fields are only sent temporarily
new_sensors = []
for key in attributes:
if key not in self.sensors:
self.sensors.add(key)
_LOGGER.info("Discovered %s, adding as sensor", key)
new_sensors.append(FroniusTemplateSensor(self, key))
self._add_entities(new_sensors, True)
# Schedule an update for all included sensors
for sensor in self._registered_sensors:
sensor.async_schedule_update_ha_state(True)
async def _update(self):
"""Return values of interest."""
pass
async def register(self, sensor):
"""Register child sensor for update subscriptions."""
self._registered_sensors.add(sensor)
class FroniusInverterSystem(FroniusAdapter):
"""Adapter for the fronius inverter with system scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_system_inverter_data()
class FroniusInverterDevice(FroniusAdapter):
"""Adapter for the fronius inverter with device scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_inverter_data(self._device)
class FroniusStorage(FroniusAdapter):
"""Adapter for the fronius battery storage."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_storage_data(self._device)
class FroniusMeterSystem(FroniusAdapter):
"""Adapter for the fronius meter with system scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_system_meter_data()
class FroniusMeterDevice(FroniusAdapter):
"""Adapter for the fronius meter with device scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_meter_data(self._device)
class FroniusPowerFlow(FroniusAdapter):
"""Adapter for the fronius power flow."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_power_flow()
class FroniusTemplateSensor(Entity):
"""Sensor for the single values (e.g. pv power, ac power)."""
def __init__(self, parent: FroniusAdapter, name):
"""Initialize a singular value sensor."""
self._name = name
self.parent = parent
self._state = None
self._unit = None
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(
self._name.replace("_", " ").capitalize(), self.parent.name
)
@property
def state(self):
"""Return the current state."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def should_poll(self):
"""Device should not be polled, returns False."""
return False
async def async_update(self):
"""Update the internal state."""
state = self.parent.data.get(self._name)
self._state = state.get("value")
self._unit = state.get("unit")
async def async_added_to_hass(self):
"""Register at parent component for updates."""
await self.parent.register(self)
def __hash__(self):
"""Hash sensor by hashing its name."""
return hash(self.name)
| {
"content_hash": "cc6ad782d71d62dc1ccf5edd4cc7dbf5",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 86,
"avg_line_length": 31.187713310580204,
"alnum_prop": 0.6052746771722478,
"repo_name": "fbradyirl/home-assistant",
"id": "ff0694afaab38c963052354ad2dcd1efb0fba3ed",
"size": "9138",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/fronius/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
} |
import random
import sys
import re
# Class containing the whole problem set.
# We can set up the maximum number of elements, and its maximum value.
class MemProblem():
cap_used = []
# Class needs the number of datastructs, membanks, and conflicts.
def __init__(self, datastructs, membanks, conflicts, penalty):
self.datastructs_n = len(datastructs)
self.membanks_n = len(membanks)
self.conflicts_n = len(conflicts)
self.conflicts = conflicts
self.membanks = membanks
self.datastructs = datastructs
self.penalty = penalty
self.membanks.append({'capacity': sys.maxint})
self.X = []
for i in range(0, len(self.datastructs)):
self.X.append([False] * len(self.membanks))
# Include external memory.
for i in range(0, self.membanks_n + 1):
self.cap_used.append(0)
# Print solution
def print_problem(self):
print self.datastructs
print self.membanks
print self.conflicts
print self.penalty
def print_solution(self):
for row in self.X:
print row
def results(self):
return "{cost}, {usage}".format(cost=self.calculate_cost(), usage=self.print_usage())
def print_usage(self):
remaining_capacities_acc = 0;
capacity_acc = 0;
for j in range(0, len(self.membanks)-1):
remaining_capacity = 0
for ai in range(0, len(self.X)):
if self.X[ai][j] == True:
remaining_capacity += self.datastructs[ai]['size']
remaining_capacities_acc += remaining_capacity
capacity_acc += self.membanks[j]['capacity']
return float(remaining_capacities_acc)/float(capacity_acc)
def calculate_cost(self):
cost = 0
# Data structs cost
for i in range(0, len(self.datastructs)):
for j in range(0, len(self.membanks)-1):
if self.X[i][j] == True:
cost += self.datastructs[i]['cost']
# Conflicts cost
for conf in self.conflicts:
cost = cost + conf['cost'] * conf['status']
# External storage cost
for i in range(0, len(self.X)):
if self.X[i][-1] == True:
cost += self.penalty * self.datastructs[i]['cost']
return cost
# Check correctness of solution.
# Returns false if incorrect.
def is_correct(self):
#Check feasability of the solution.
for i in self.X:
trues = 0
for j in i:
if j == True:
trues += 1
if trues != 1:
return False
# We'll need to set the correct cap_used.
for i in range(len(self.cap_used)):
self.cap_used[i] = 0
# Ensure that each datastructure fits into its membank.
# Ensure that each membank is not overflowed.
for i in range(len(self.X)):
for j in range(len(self.X[i])):
if self.X[i][j] == True:
ds = self.datastructs[i]['size']
mem = self.membanks[j]['capacity']
self.cap_used[j] += ds
if self.cap_used[j] > mem:
return False
return True
def cost(self, i, j):
cost = self.datastructs[i]['cost'] #Access cost of i
if j == len(self.membanks)-1:
cost = cost*self.penalty
for conflict in self.conflicts:
if conflict['a'] == i or conflict['b'] == i:
if self.X[i][j] == True:
cost += conflict['cost'] * self.conflict_status(conflict) * conflict['cost']
else:
self.X[i][j] = True
cost += conflict['cost'] * self.conflict_status(conflict) * conflict['cost']
self.X[i][j] = False
return cost
def whereis(self, i):
for j in range(0, len(self.X[i])):
if self.X[i][j] == True:
return j
return None
def update_conflicts(self):
for conflict in self.conflicts:
conflict['status'] = self.conflict_status(conflict)
def conflict_status(self, conflict):
cost = 0
a = conflict['a']
b = conflict['b']
j1 = self.whereis(a)
j2 = self.whereis(b)
if j1 == None or j2 == None:
return 0
elif j1 == j2:
if j1 == len(self.membanks)-1 and j2 == len(self.membanks)-1:
return self.penalty*2
else:
return 1
elif j1 == len(self.membanks)-1 or j2 == len(self.membanks)-1:
return self.penalty
return 0
def write_file(self, filename):
f = open(filename, 'w')
f.write('num_data_structures = {datastructs};\n'.format(datastructs=self.datastructs_n))
f.write('num_memory_banks = {membanks};\n'.format(membanks=self.membanks_n))
f.write('p = {penalty};\n'.format(penalty=self.penalty))
f.write('conflicts = {conflicts};\n\n'.format(conflicts=self.conflicts_n))
#Datastruct sizes
f.write('s = [')
for datastruct in self.datastructs[:-1]:
f.write('{size}, '.format(size=datastruct['size']))
f.write('{size}];\n'.format(size=self.datastructs[-1]['size']))
#Membank sizes
f.write('c = [')
for membank in self.membanks[:-2]:
f.write('{capacity}, '.format(capacity=membank['capacity']))
f.write('{capacity}];\n'.format(capacity=self.membanks[-2]['capacity']))
#Datastruct costs
f.write('e = [')
for datastruct in self.datastructs[:-1]:
f.write('{cost}, '.format(cost=datastruct['cost']))
f.write('{cost}];\n'.format(cost=self.datastructs[-1]['cost']))
#Conflict costs
f.write('d = [')
for conflict in self.conflicts[:-1]:
f.write('{cost}, '.format(cost=conflict['cost']))
f.write('{cost}];\n\n'.format(cost=self.conflicts[-1]['cost']))
#A
f.write('A = [')
for conflict in self.conflicts[:-1]:
f.write('{a}, '.format(a=conflict['a']))
f.write('{a}];\n'.format(a=self.conflicts[-1]['a']))
#B
f.write('B = [')
for conflict in self.conflicts[:-1]:
f.write('{b}, '.format(b=conflict['b']))
f.write('{b}];\n'.format(b=self.conflicts[-1]['b']))
def copy(self):
datastructs = []
membanks = []
conflicts = []
for datastruct in self.datastructs:
datastructs.append({'size': datastruct['size'] ,'cost': datastruct['cost']})
for membank in self.membanks:
membanks.append({'capacity': membank['capacity']})
for conflict in self.conflicts:
conflicts.append({'a': conflict['a'] ,'b': conflict['b'], 'cost': conflict['cost'], 'status': conflict['status']})
membanks.pop()
problem = MemProblem(datastructs=datastructs, membanks=membanks, conflicts=conflicts, penalty=self.penalty)
for row in range(0, len(self.X)):
problem.X[row] = list(self.X[row])
problem.update_conflicts()
return problem
# Create a random problem.
def read_problem(filename):
data = open(filename, 'r').read()
s = [int(numeric_string) for numeric_string in re.search('s = \[((?:\d+,\s*)*\d+)\];', data).group(1).replace(' ', '').split(',')]
d = [int(numeric_string) for numeric_string in re.search('d = \[((?:\d+,\s*)*\d+)\];', data).group(1).replace(' ', '').split(',')]
c = [int(numeric_string) for numeric_string in re.search('c = \[((?:\d+,\s*)*\d+)\];', data).group(1).replace(' ', '').split(',')]
e = [int(numeric_string) for numeric_string in re.search('e = \[((?:\d+,\s*)*\d+)\];', data).group(1).replace(' ', '').split(',')]
A = [int(numeric_string) for numeric_string in re.search('A = \[((?:\d+,\s*)*\d+)\];', data).group(1).replace(' ', '').split(',')]
B = [int(numeric_string) for numeric_string in re.search('B = \[((?:\d+,\s*)*\d+)\];', data).group(1).replace(' ', '').split(',')]
penalty = int(re.search('p = (\d+);', data).group(1))
conflicts = []
datastructs = []
membanks = []
for (a, b, cost) in zip(A, B, d):
conflicts.append({'a': a, 'b':b, 'cost': cost, 'status': 0})
for (size, cost) in zip(s, e):
datastructs.append({'size': size, 'cost': cost})
for capacity in c:
membanks.append({'capacity': capacity})
return MemProblem(datastructs=datastructs, membanks=membanks, conflicts=conflicts, penalty=penalty)
def random_problem(seed, dss_min, dss_max, dsc_min, dsc_max, ds_n, mem_min, mem_max, mem_n, c_min, c_max, c_n, p_min, p_max):
#Penalty
penalty = random.randint(p_min, p_max)
# Create random membanks.
membanks = [0] * mem_n
for i in range(0, mem_n):
membanks[i] = { 'capacity': random.randint(mem_min, mem_max) }
# Create random datastructs.
datastructs = [0] * ds_n
for i in range(0, ds_n):
datastructs[i] = { 'size': random.randint(dss_min, dss_max), 'cost': random.randint(dsc_min, dsc_max) }
# Create random conflicts
conflicts = [0] * c_n
for i in range(0, c_n):
conflicts[i] = {
'a': random.randint(0, ds_n-1),
'b': random.randint(0, ds_n-1),
'cost': random.randint(c_min, c_max),
'status': 0
}
return MemProblem(datastructs=datastructs, membanks=membanks, conflicts=conflicts, penalty=penalty)
| {
"content_hash": "734f03a2285bbd1b3f172bf17d68b7a2",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 131,
"avg_line_length": 30.6,
"alnum_prop": 0.6336238198983297,
"repo_name": "dplbsd/memexplorerpy",
"id": "5416cb774aaa45d137fdd16fe2e822da897c7fb9",
"size": "8262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/memproblem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "25915"
}
],
"symlink_target": ""
} |
"""
KISSinsights template tags.
"""
from __future__ import absolute_import
import re
from django.template import Library, Node, TemplateSyntaxError
from analytical.utils import get_identity, get_required_setting
ACCOUNT_NUMBER_RE = re.compile(r'^\d+$')
SITE_CODE_RE = re.compile(r'^[\w]+$')
SETUP_CODE = """
<script type="text/javascript">var _kiq = _kiq || []; %(commands)s</script>
<script type="text/javascript" src="//s3.amazonaws.com/ki.js/%(account_number)s/%(site_code)s.js" async="true"></script>
""" # noqa
IDENTIFY_CODE = "_kiq.push(['identify', '%s']);"
SHOW_SURVEY_CODE = "_kiq.push(['showSurvey', %s]);"
SHOW_SURVEY_CONTEXT_KEY = 'kiss_insights_show_survey'
register = Library()
@register.tag
def kiss_insights(parser, token):
"""
KISSinsights set-up template tag.
Renders Javascript code to set-up surveys. You must supply
your account number and site code in the
``KISS_INSIGHTS_ACCOUNT_NUMBER`` and ``KISS_INSIGHTS_SITE_CODE``
settings.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return KissInsightsNode()
class KissInsightsNode(Node):
def __init__(self):
self.account_number = get_required_setting(
'KISS_INSIGHTS_ACCOUNT_NUMBER', ACCOUNT_NUMBER_RE,
"must be (a string containing) a number")
self.site_code = get_required_setting(
'KISS_INSIGHTS_SITE_CODE', SITE_CODE_RE,
"must be a string containing three characters")
def render(self, context):
commands = []
identity = get_identity(context, 'kiss_insights')
if identity is not None:
commands.append(IDENTIFY_CODE % identity)
try:
commands.append(SHOW_SURVEY_CODE % context[SHOW_SURVEY_CONTEXT_KEY])
except KeyError:
pass
html = SETUP_CODE % {
'account_number': self.account_number,
'site_code': self.site_code,
'commands': " ".join(commands),
}
return html
def contribute_to_analytical(add_node):
KissInsightsNode() # ensure properly configured
add_node('body_top', KissInsightsNode)
| {
"content_hash": "2d182576c415864b54f085358eb903b5",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 124,
"avg_line_length": 30.791666666666668,
"alnum_prop": 0.6364456472710871,
"repo_name": "bittner/django-analytical",
"id": "8381eb3ee69be6f61d6a72a0badb6bc2d48284e7",
"size": "2217",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analytical/templatetags/kiss_insights.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169721"
}
],
"symlink_target": ""
} |
from .linear_mixed_model import LinearMixedModel
__all__ = [
'LinearMixedModel',
]
| {
"content_hash": "8e16cc680f0594a75a1575542aae66f6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 48,
"avg_line_length": 17.6,
"alnum_prop": 0.6931818181818182,
"repo_name": "cseed/hail",
"id": "9995cbb25f5f70e4cf2f9b2b1d5548c7b8668b40",
"size": "89",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "hail/python/hail/stats/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "289"
},
{
"name": "C++",
"bytes": "170210"
},
{
"name": "CSS",
"bytes": "20423"
},
{
"name": "Dockerfile",
"bytes": "7426"
},
{
"name": "HTML",
"bytes": "43106"
},
{
"name": "Java",
"bytes": "22564"
},
{
"name": "JavaScript",
"bytes": "730"
},
{
"name": "Jupyter Notebook",
"bytes": "162397"
},
{
"name": "Makefile",
"bytes": "58348"
},
{
"name": "PLpgSQL",
"bytes": "23163"
},
{
"name": "Python",
"bytes": "3477764"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "Scala",
"bytes": "3496240"
},
{
"name": "Shell",
"bytes": "41254"
},
{
"name": "TSQL",
"bytes": "10385"
},
{
"name": "TeX",
"bytes": "7125"
},
{
"name": "XSLT",
"bytes": "9787"
}
],
"symlink_target": ""
} |
import multiprocessing
import random
import threading
import time
from concurrent import futures
import grpc
from src.proto.grpc.testing import control_pb2
from src.proto.grpc.testing import services_pb2_grpc
from src.proto.grpc.testing import stats_pb2
from tests.qps import benchmark_client
from tests.qps import benchmark_server
from tests.qps import client_runner
from tests.qps import histogram
from tests.unit import resources
class WorkerServer(services_pb2_grpc.WorkerServiceServicer):
"""Python Worker Server implementation."""
def __init__(self):
self._quit_event = threading.Event()
def RunServer(self, request_iterator, context):
config = next(request_iterator).setup
server, port = self._create_server(config)
cores = multiprocessing.cpu_count()
server.start()
start_time = time.time()
yield self._get_server_status(start_time, start_time, port, cores)
for request in request_iterator:
end_time = time.time()
status = self._get_server_status(start_time, end_time, port, cores)
if request.mark.reset:
start_time = end_time
yield status
server.stop(None)
def _get_server_status(self, start_time, end_time, port, cores):
end_time = time.time()
elapsed_time = end_time - start_time
stats = stats_pb2.ServerStats(
time_elapsed=elapsed_time,
time_user=elapsed_time,
time_system=elapsed_time)
return control_pb2.ServerStatus(stats=stats, port=port, cores=cores)
def _create_server(self, config):
if config.async_server_threads == 0:
# This is the default concurrent.futures thread pool size, but
# None doesn't seem to work
server_threads = multiprocessing.cpu_count() * 5
else:
server_threads = config.async_server_threads
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=server_threads))
if config.server_type == control_pb2.ASYNC_SERVER:
servicer = benchmark_server.BenchmarkServer()
services_pb2_grpc.add_BenchmarkServiceServicer_to_server(servicer,
server)
elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
resp_size = config.payload_config.bytebuf_params.resp_size
servicer = benchmark_server.GenericBenchmarkServer(resp_size)
method_implementations = {
'StreamingCall':
grpc.stream_stream_rpc_method_handler(servicer.StreamingCall),
'UnaryCall':
grpc.unary_unary_rpc_method_handler(servicer.UnaryCall),
}
handler = grpc.method_handlers_generic_handler(
'grpc.testing.BenchmarkService', method_implementations)
server.add_generic_rpc_handlers((handler,))
else:
raise Exception(
'Unsupported server type {}'.format(config.server_type))
if config.HasField('security_params'): # Use SSL
server_creds = grpc.ssl_server_credentials((
(resources.private_key(), resources.certificate_chain()),))
port = server.add_secure_port('[::]:{}'.format(config.port),
server_creds)
else:
port = server.add_insecure_port('[::]:{}'.format(config.port))
return (server, port)
def RunClient(self, request_iterator, context):
config = next(request_iterator).setup
client_runners = []
qps_data = histogram.Histogram(config.histogram_params.resolution,
config.histogram_params.max_possible)
start_time = time.time()
# Create a client for each channel
for i in xrange(config.client_channels):
server = config.server_targets[i % len(config.server_targets)]
runner = self._create_client_runner(server, config, qps_data)
client_runners.append(runner)
runner.start()
end_time = time.time()
yield self._get_client_status(start_time, end_time, qps_data)
# Respond to stat requests
for request in request_iterator:
end_time = time.time()
status = self._get_client_status(start_time, end_time, qps_data)
if request.mark.reset:
qps_data.reset()
start_time = time.time()
yield status
# Cleanup the clients
for runner in client_runners:
runner.stop()
def _get_client_status(self, start_time, end_time, qps_data):
latencies = qps_data.get_data()
end_time = time.time()
elapsed_time = end_time - start_time
stats = stats_pb2.ClientStats(
latencies=latencies,
time_elapsed=elapsed_time,
time_user=elapsed_time,
time_system=elapsed_time)
return control_pb2.ClientStatus(stats=stats)
def _create_client_runner(self, server, config, qps_data):
if config.client_type == control_pb2.SYNC_CLIENT:
if config.rpc_type == control_pb2.UNARY:
client = benchmark_client.UnarySyncBenchmarkClient(
server, config, qps_data)
elif config.rpc_type == control_pb2.STREAMING:
client = benchmark_client.StreamingSyncBenchmarkClient(
server, config, qps_data)
elif config.client_type == control_pb2.ASYNC_CLIENT:
if config.rpc_type == control_pb2.UNARY:
client = benchmark_client.UnaryAsyncBenchmarkClient(
server, config, qps_data)
else:
raise Exception('Async streaming client not supported')
else:
raise Exception(
'Unsupported client type {}'.format(config.client_type))
# In multi-channel tests, we split the load across all channels
load_factor = float(config.client_channels)
if config.load_params.WhichOneof('load') == 'closed_loop':
runner = client_runner.ClosedLoopClientRunner(
client, config.outstanding_rpcs_per_channel)
else: # Open loop Poisson
alpha = config.load_params.poisson.offered_load / load_factor
def poisson():
while True:
yield random.expovariate(alpha)
runner = client_runner.OpenLoopClientRunner(client, poisson())
return runner
def CoreCount(self, request, context):
return control_pb2.CoreResponse(cores=multiprocessing.cpu_count())
def QuitWorker(self, request, context):
self._quit_event.set()
return control_pb2.Void()
def wait_for_quit(self):
self._quit_event.wait()
| {
"content_hash": "42681225dc01e250507393849613bdf4",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 79,
"avg_line_length": 40.16860465116279,
"alnum_prop": 0.606310609350123,
"repo_name": "hstefan/grpc",
"id": "de9535f46eb3621c3c54e91fb3ce9869bc37230f",
"size": "8438",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/tests/qps/worker_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "23280"
},
{
"name": "C",
"bytes": "6943549"
},
{
"name": "C#",
"bytes": "1530367"
},
{
"name": "C++",
"bytes": "2129898"
},
{
"name": "CMake",
"bytes": "429042"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "340408"
},
{
"name": "M4",
"bytes": "40976"
},
{
"name": "Makefile",
"bytes": "862236"
},
{
"name": "Objective-C",
"bytes": "350735"
},
{
"name": "PHP",
"bytes": "301694"
},
{
"name": "Protocol Buffer",
"bytes": "126616"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1448863"
},
{
"name": "Ruby",
"bytes": "687006"
},
{
"name": "Shell",
"bytes": "59102"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
DEBUG = True
USE_TZ = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'test.db'),
}
}
INSTALLED_APPS = [
'taxii_services',
'django.contrib.sites',
]
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'taxii_services.middleware.StatusMessageExceptionMiddleware',
)
ROOT_URLCONF = 'taxii_services.urls'
SECRET_KEY = "kjebl23k4b64.35mg.sd,mfnt.,3m4t1,m3nbr,1235"
| {
"content_hash": "06a9a7916ba550b18c9b48128b8f6697",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 67,
"avg_line_length": 17.586206896551722,
"alnum_prop": 0.6549019607843137,
"repo_name": "TAXIIProject/django-taxii-services",
"id": "114c5c140fbf0ae5fe166ae03f4701205d8d2aa8",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "346135"
}
],
"symlink_target": ""
} |
"""
Build Tasks
~~~~~~~~~~~
"""
import invoke as _invoke
@_invoke.task(default=True)
def sdist(ctx):
""" Build source distribution """
with ctx.root_dir():
ctx.run('python setup.py sdist')
| {
"content_hash": "003b3dde54264b8a64982cac8abe42fe",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 40,
"avg_line_length": 14.928571428571429,
"alnum_prop": 0.5885167464114832,
"repo_name": "ndparker/hod",
"id": "fc9e6a96cec2739dde8e3b2d721bae810092edaa",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9392"
},
{
"name": "Python",
"bytes": "13614"
}
],
"symlink_target": ""
} |
import re
import os
import sys
import time
import unittest
import ConfigParser
from setuptools import setup, Command
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class SQLiteTest(Command):
"""
Run the tests on SQLite
"""
description = "Run tests on SQLite"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.environ['TRYTOND_DATABASE_URI'] = 'sqlite://'
os.environ['DB_NAME'] = ':memory:'
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class PostgresTest(Command):
"""
Run the tests on Postgres.
"""
description = "Run tests on Postgresql"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.environ['TRYTOND_DATABASE_URI'] = 'postgresql://'
os.environ['DB_NAME'] = 'test_' + str(int(time.time()))
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
config = ConfigParser.ConfigParser()
config.readfp(open('tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
requires = []
MODULE2PREFIX = {
'sale_channel': 'fio',
'sale_line_warehouse': 'fio',
}
MODULE = "pos"
PREFIX = "fio"
for dep in info.get('depends', []):
if not re.match(r'(ir|res|webdav)(\W|$)', dep):
requires.append(
'%s_%s >= %s.%s, < %s.%s' % (
MODULE2PREFIX.get(dep, 'trytond'), dep,
major_version, minor_version, major_version,
minor_version + 1
)
)
requires.append(
'trytond >= %s.%s, < %s.%s' % (
major_version, minor_version, major_version, minor_version + 1
)
)
setup(
name='%s_%s' % (PREFIX, MODULE),
version=info.get('version', '0.0.1'),
description="Tryton POS helper module",
author="Fulfil.IO Inc., Openlabs Technologies and Consulting (P) Ltd.",
author_email='info@fulfil.io',
url='http://www.fulfil.io/',
package_dir={'trytond.modules.%s' % MODULE: '.'},
packages=[
'trytond.modules.%s' % MODULE,
'trytond.modules.%s.tests' % MODULE,
],
package_data={
'trytond.modules.%s' % MODULE: info.get('xml', []) +
info.get('translation', []) +
['tryton.cfg', 'locale/*.po', 'tests/*.rst', 'reports/*.odt'] +
['view/*.xml'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Office/Business',
],
license='BSD',
install_requires=requires,
tests_require=['pycountry'],
zip_safe=False,
entry_points="""
[trytond.modules]
%s = trytond.modules.%s
""" % (MODULE, MODULE),
test_suite='tests',
test_loader='trytond.test_loader:Loader',
cmdclass={
'test': SQLiteTest,
'test_on_postgres': PostgresTest,
}
)
| {
"content_hash": "3ac9ceeaffce5f79bf9865edfa990cc2",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 76,
"avg_line_length": 25.853146853146853,
"alnum_prop": 0.5769542872599405,
"repo_name": "fulfilio/trytond-pos",
"id": "ddbb1bb43ac940c4654c9af4feda9c1f37b13007",
"size": "3719",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "348"
},
{
"name": "Python",
"bytes": "105045"
}
],
"symlink_target": ""
} |
failthresh = 0.03 # allow a little more LSB noise between platforms
failpercent = .5
outputs = [ "out.exr", "test_microfacet_dist.exr", "test_texture.exr", "test_spline.exr", "out.txt" ]
command = testrender("-optix -res 320 240 scene.xml out.exr")
command += testrender("-optix -res 320 240 test_microfacet_dist.xml test_microfacet_dist.exr")
command += testrender("-optix -res 1 1 test_print.xml dummy.exr")
command += testrender("-optix -res 1 1 test_compare.xml dummy.exr")
command += testrender("-optix -res 1 1 test_assign.xml dummy.exr")
command += testrender("-optix -res 1 1 test_assign_02.xml dummy.exr")
command += testrender("-optix -res 1 1 test_str_ops.xml dummy.exr")
command += testrender("-optix -res 1 1 test_userdata_string.xml dummy.exr")
command += testshade("-optix -res 256 256 test_spline -o Cout test_spline.exr")
command += testshade("-optix -res 512 512 test_texture -o Cout test_texture.exr")
| {
"content_hash": "55624b33535166d7143b06778e1d7d6a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 102,
"avg_line_length": 66.14285714285714,
"alnum_prop": 0.7192224622030238,
"repo_name": "brechtvl/OpenShadingLanguage",
"id": "833915c27099ecddc9dc415fe066b9d9b3f81f6f",
"size": "1120",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "testsuite/testoptix/run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "330570"
},
{
"name": "C++",
"bytes": "4490734"
},
{
"name": "CMake",
"bytes": "177235"
},
{
"name": "CSS",
"bytes": "4165"
},
{
"name": "Cuda",
"bytes": "77227"
},
{
"name": "GLSL",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "43861"
},
{
"name": "Lex",
"bytes": "27918"
},
{
"name": "Makefile",
"bytes": "17031"
},
{
"name": "Python",
"bytes": "308870"
},
{
"name": "Shell",
"bytes": "34984"
},
{
"name": "TeX",
"bytes": "250273"
},
{
"name": "Yacc",
"bytes": "50836"
}
],
"symlink_target": ""
} |
import io
def source_event(path):
event = {
'source': {
'uri': path,
'data': io.open(path, 'rU', encoding='utf8', newline='').read(),
'mediaType': 'text/x.cucumber.gherkin+plain'
}
}
return event
class SourceEvents:
def __init__(self, paths):
self.paths = paths
def enum(self):
return map(source_event, self.paths)
| {
"content_hash": "f4365c7912ce3ff5b24e017780351eb7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 76,
"avg_line_length": 22.555555555555557,
"alnum_prop": 0.5295566502463054,
"repo_name": "cucumber/gherkin-python",
"id": "94f608a564cfd677ba7ebfecd838f69f8c36f20d",
"size": "406",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "gherkin/stream/source_events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6854"
},
{
"name": "Makefile",
"bytes": "4083"
},
{
"name": "Python",
"bytes": "227759"
},
{
"name": "Shell",
"bytes": "1087"
}
],
"symlink_target": ""
} |
import sys, os
parentDir= os.path.dirname(os.path.abspath(__file__))+os.sep+".."
sys.path.insert(0,parentDir)
if __name__ == "__main__":
import sys
print >> sys.stderr, """
This is a support file and is designed to be imported, not run on its own.
This module amends the import path to include the parent directory,
thereby ensuring that dvbcss from this package will be used, instead of any installed
instance.
"""
| {
"content_hash": "b330484e0fed191c980c5eff9204b3e8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 85,
"avg_line_length": 30.785714285714285,
"alnum_prop": 0.703016241299304,
"repo_name": "bbc/pydvbcss",
"id": "81d375db3a8b9215e1b5e23cb59dcba671a7b552",
"size": "1142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/_useDvbCssUninstalled.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "411110"
}
],
"symlink_target": ""
} |
import sys
import struct
MESSAGE_TYPE_SEND_MESSAGE_REQUEST = 0
MESSAGE_TYPE_SEND_MESSAGE_RESPONSE = 1
MESSAGE_TYPE_CONNECT = 2
MESSAGE_TYPE_CONNECT_MESSAGE = 3
def Main():
message_number = 0
while 1:
# Read the message type (first 4 bytes).
type_bytes = sys.stdin.read(4)
if len(type_bytes) == 0:
break
message_type = struct.unpack('i', type_bytes)[0]
# Read the message length (4 bytes).
text_length = struct.unpack('i', sys.stdin.read(4))[0]
# Read the text (JSON object) of the message.
text = sys.stdin.read(text_length).decode('utf-8')
message_number += 1
response = '{{"id": {0}, "echo": {1}}}'.format(message_number,
text).encode('utf-8')
# Choose the correct message type for the response.
if message_type == MESSAGE_TYPE_SEND_MESSAGE_REQUEST:
response_type = MESSAGE_TYPE_SEND_MESSAGE_RESPONSE
else:
response_type = MESSAGE_TYPE_CONNECT_MESSAGE
try:
sys.stdout.write(struct.pack("II", response_type, len(response)))
sys.stdout.write(response)
sys.stdout.flush()
except IOError:
break
if __name__ == '__main__':
Main()
| {
"content_hash": "0fd0400be25f7e893f478d3e64a0afb3",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 71,
"avg_line_length": 26.02173913043478,
"alnum_prop": 0.6182121971595655,
"repo_name": "leiferikb/bitpop-private",
"id": "ffb6cefeb94761b5560cb555c9f9a0ab9ad5954f",
"size": "1498",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "chrome/test/data/native_messaging/Native Hosts/echo.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1871"
},
{
"name": "C",
"bytes": "1800028"
},
{
"name": "C++",
"bytes": "76499582"
},
{
"name": "CSS",
"bytes": "803682"
},
{
"name": "Java",
"bytes": "1234788"
},
{
"name": "JavaScript",
"bytes": "21793252"
},
{
"name": "Objective-C",
"bytes": "5358744"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "64410"
},
{
"name": "Python",
"bytes": "3017857"
},
{
"name": "Ruby",
"bytes": "650"
},
{
"name": "Shell",
"bytes": "322362"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "12138"
}
],
"symlink_target": ""
} |
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, HttpResponseRedirect, redirect
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
# Create your views here.
from billing.models import Transaction
from notifications.models import Notification
from .forms import LoginForm, RegisterForm
from .models import MyUser
@login_required
def account_home(request):
notifications = Notification.objects.get_recent_for_user(request.user, 6)
transactions = Transaction.objects.get_recent_for_user(request.user, 3)
context = {
"notifications": notifications,
"transactions": transactions
}
return render(request, "accounts/account_home.html", context)
def auth_logout(request):
logout(request)
return HttpResponseRedirect('/')
def auth_login(request):
form = LoginForm(request.POST or None)
next_url = request.GET.get('next')
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
print username, password
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if next_url is not None:
return HttpResponseRedirect(next_url)
return HttpResponseRedirect("/")
action_url = reverse("login")
title = "Login"
submit_btn = title
submit_btn_class = "btn-success btn-block"
extra_form_link = "Upgrade your account today <a href='%s'>here</a>!" %(reverse("account_upgrade"))
context = {
"form": form,
"action_url": action_url,
"title": title,
"submit_btn": submit_btn,
"submit_btn_class": submit_btn_class,
"extra_form_link":extra_form_link
}
return render(request, "accounts/account_login_register.html", context)
def auth_register(request):
form = RegisterForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data['username']
email = form.cleaned_data['email']
password = form.cleaned_data['password2']
#MyUser.objects.create_user(username=username, email=email, password=password)
new_user = MyUser()
new_user.username = username
new_user.email = email
#new_user.password = password #WRONG
new_user.set_password(password) #RIGHT
new_user.save()
action_url = reverse("register")
title = "Register"
submit_btn = "Create free account"
context = {
"form": form,
"action_url": action_url,
"title": title,
"submit_btn": submit_btn
}
return render(request, "accounts/account_login_register.html", context)
| {
"content_hash": "7ebeb6ad05e1714f70ad3c72d1cc7796",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 100,
"avg_line_length": 27.148936170212767,
"alnum_prop": 0.734717868338558,
"repo_name": "codingforentrepreneurs/srvup-membership",
"id": "d2c85ee23db3f11ed7709d8a2594821e75aa64b8",
"size": "2552",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/accounts/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41725"
},
{
"name": "HTML",
"bytes": "36106"
},
{
"name": "JavaScript",
"bytes": "99771"
},
{
"name": "Python",
"bytes": "101857"
}
],
"symlink_target": ""
} |
class PagarmeApiError(Exception): pass
class PagarmeTransactionError(Exception): pass
class NotPaidException(PagarmeTransactionError): pass
class NotBoundException(PagarmeTransactionError): pass
| {
"content_hash": "0794783ac5742328c9d344118b49a5e0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 54,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.868020304568528,
"repo_name": "aroncds/pagarme-python",
"id": "c061bdfb19f3ae43d25992c2c8e41ed6ad839daf",
"size": "217",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pagarme/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48811"
}
],
"symlink_target": ""
} |
import sys
import getopt
import json
import logging
import time
import requests.exceptions
import importlib
import datetime
import threading
import jenkins
import gadget
import sysutils
__version__ = '1.0.1'
class Configuration(object):
def __init__(self, path_to_config):
logging.info("Reading configuration from path: %s" % path_to_config)
config_fd = open(path_to_config)
config_json = json.load(config_fd)
config_fd.close()
self.view_url = config_json['viewUrl']
logging.info("Using view at: %s", self.view_url)
self.view_refresh_interval = config_json.get('viewRefreshInterval', 30)
logging.info("Using view refresh interval: %d", self.view_refresh_interval)
self.view_refresh_error_interval = config_json.get('viewRefreshErrorInterval', 60)
logging.info("Using view refresh error interval: %d", self.view_refresh_interval)
self.ssl_verify_certificates = config_json.get('sslVerifyCertificates', True)
if not self.ssl_verify_certificates:
logging.warn("SSL certificate validation disabled via config")
requests.packages.urllib3.disable_warnings() # requests uses a bundled urllib3 module
self.network_interface_name = config_json.get('networkInterfaceName', None)
self.display_update_interval = config_json.get('displayUpdateInterval', 5.0)
self.delta_time_step = config_json.get('deltaTimeStep', 10)
self.username = config_json.get('username', None)
self.auth_token = config_json.get('authToken', None)
class ViewState(object):
def __init__(self, view):
self.view = view
self.failed_index = None
class RenderThread(threading.Thread):
def __init__(self, gadget, update_interval, delta_time_step):
super(RenderThread, self).__init__()
self.setDaemon(True)
self.__view_state = None
self.gadget = gadget
self.update_interval = update_interval
self.delta_time_step = delta_time_step
def run(self):
while True:
time.sleep(self.update_interval)
view_state = self.acquire_view_state()
if view_state is None:
self.render_no_data()
else:
self.render(view_state)
def acquire_view_state(self):
"""Fetch view state"""
return self.__view_state # reading instance variable is atomic in Python
def set_view_state(self, view_state):
self.__view_state = view_state # writing instance variable is atomic in Python
def render(self, view_state):
if view_state.failed_index is None:
self.display_overview(view_state.view)
if view_state.view.num_failing_jobs > 0:
view_state.failed_index = 0 # start displaying first failed job at next tick
else:
failed_jobs = view_state.view.failing_jobs()
self.display_failed_job(failed_jobs[view_state.failed_index])
if view_state.failed_index < len(failed_jobs) - 1:
view_state.failed_index += 1 # display next failed job at next tick
else:
view_state.failed_index = None # go back to overview at next tick
def display_overview(self, view):
lines = [
view.name,
'S/U/F: %d/%d/%d' % (view.num_succeeding_jobs, view.num_unstable_jobs, view.num_failing_jobs),
self.get_last_updated_str(view.last_update)
]
self.gadget.set_status_lines(lines)
# set mood depending on failed/unstable/successful jobs
if view.num_failing_jobs > 0:
self.gadget.set_background_status(gadget.BackgroundStatus.Error)
self.gadget.set_indicator(0, gadget.IndicatorStatus.Off)
self.gadget.set_indicator(1, gadget.IndicatorStatus.Off)
self.gadget.set_indicator(2, gadget.IndicatorStatus.On)
elif view.num_unstable_jobs > 0:
self.gadget.set_background_status(gadget.BackgroundStatus.Warn)
self.gadget.set_indicator(0, gadget.IndicatorStatus.Off)
self.gadget.set_indicator(1, gadget.IndicatorStatus.On)
self.gadget.set_indicator(2, gadget.IndicatorStatus.On)
else:
self.gadget.set_background_status(gadget.BackgroundStatus.Ok)
self.gadget.set_indicator(0, gadget.IndicatorStatus.On)
self.gadget.set_indicator(1, gadget.IndicatorStatus.On)
self.gadget.set_indicator(2, gadget.IndicatorStatus.On)
def display_failed_job(self, job_and_build):
job, build = job_and_build
if len(job.display_name) > gadget.MAX_CHARS:
line1 = job.display_name[:gadget.MAX_CHARS]
line2 = '..' + job.display_name[gadget.MAX_CHARS:]
else:
line1 = job.display_name
line2 = ''
lines = [
'Job Failed:',
line1, line2
]
self.gadget.set_status_lines(lines)
def get_last_updated_str(self, last_update):
now_datetime = datetime.datetime.now()
timedelta = now_datetime - last_update
dt_seconds = timedelta.seconds
if dt_seconds < self.delta_time_step:
return "Just now!"
else:
# count up in 30 second intervals (default) to be less busy
return "%d seconds ago" % ((timedelta.seconds/self.delta_time_step)*self.delta_time_step)
def render_no_data(self):
self.gadget.set_background_status(gadget.BackgroundStatus.Info)
self.gadget.set_status_lines([
'No data yet',
'Please wait...',
'Refresh pending'
])
class Controller(object):
def __init__(self, config):
self.config = config
if sysutils.is_raspberrypi():
self.gadget = importlib.import_module('dothatgadget').DotHatGadget()
else:
logging.warn('Not running on RaspberryPi, using dummy hardware')
self.gadget = gadget.GadgetBase()
logging.info("Instantiated gadget: %s", type(self.gadget))
self.render_thread = RenderThread(self.gadget, config.display_update_interval, config.delta_time_step)
def run_blocking(self):
self.render_thread.start()
while True:
view = jenkins.View(self.config.view_url, username=self.config.username, auth_token=self.config.auth_token,
ssl_verify_certificates=self.config.ssl_verify_certificates)
try:
view.refresh()
except requests.exceptions.RequestException as e:
logging.error("Failed to refresh view, will try again later: %s", e.message)
time.sleep(self.config.view_refresh_error_interval)
continue
except ValueError as e:
logging.error("Failed to refresh view, will try again later: %s", e.message)
time.sleep(self.config.view_refresh_error_interval)
continue
# update the rendering thread's view state
self.render_thread.set_view_state(ViewState(view))
# and sleep until the next iteration
time.sleep(self.config.view_refresh_interval)
def display_system_infos(self):
lines = [
'Version: %s' % __version__,
sysutils.get_ip_address(),
'Getting ready...'
]
self.gadget.set_status_lines(lines)
self.gadget.display_boot_animation()
def print_usage():
print("Usage: controller.py -c PATH_TO_CONFIGURATION [--debug]")
if __name__ == '__main__':
debug = False
options, remainder = getopt.getopt(sys.argv[1:], 'c:', ['config=', 'debug'])
config = None
for o, a in options:
if o in ('-c', '--config'):
config = Configuration(a)
elif o == '--debug':
debug = True
if config is None:
print_usage()
sys.exit(1)
logging.basicConfig(stream=sys.stdout, level=logging.WARN,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='YYYY-MM-DDTHH:mm:ss.SSSZ')
if debug:
logging.getLogger().setLevel(logging.DEBUG)
controller = Controller(config)
controller.display_system_infos()
time.sleep(10)
controller.run_blocking()
| {
"content_hash": "b3c39db3383431cd539f571f4f3956fe",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 119,
"avg_line_length": 39.28169014084507,
"alnum_prop": 0.6158718776144376,
"repo_name": "suzukieng/dot-jenkins",
"id": "cee06e755f3ed770b88562a3fa21dbc6d68cbdde",
"size": "8389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17169"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
} |
from pyparsing import *
import act
frequency = Forward().setParseAction(act.frequency)
expcode = Forward().setParseAction(act.expcode)
expansion = Forward().setParseAction(act.expansion) | {
"content_hash": "2e8725a39dcf5736fe5a2bb2b19ff24d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 26.857142857142858,
"alnum_prop": 0.7978723404255319,
"repo_name": "jrgdiz/cardwalker",
"id": "4e6985aa267ae3776d295a5c0eaa1d76dc2df8da",
"size": "188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grammar/expansions/decl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49118"
},
{
"name": "Shell",
"bytes": "2228"
}
],
"symlink_target": ""
} |
""" Provides general utility functions to be used across modules """
from __future__ import unicode_literals, absolute_import, print_function
import re
import functools
import warnings
from .compat import StringType, UnicodeType, quote_url
from .xml import XML
def really_unicode(in_string):
"""
Ensures s is returned as a unicode string and not just a string through
a series of progressively relaxed decodings
"""
if type(in_string) is StringType:
for args in (('utf-8',), ('latin-1',), ('ascii', 'replace')):
try:
# pylint: disable=star-args
in_string = in_string.decode(*args)
break
except UnicodeDecodeError:
continue
if type(in_string) is not UnicodeType:
raise ValueError('%s is not a string at all.' % in_string)
return in_string
def really_utf8(in_string):
""" First decodes s via really_unicode to ensure it can successfully be
encoded as utf-8 This is required since just calling encode on a string
will often cause python to perform a coerced strict auto-decode as ascii
first and will result in a UnicodeDecodeError being raised After
really_unicode returns a safe unicode string, encode as 'utf-8' and return
the utf-8 encoded string.
"""
return really_unicode(in_string).encode('utf-8')
FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])')
def camel_to_underscore(string):
""" Convert camelcase to lowercase and underscore
Recipy from http://stackoverflow.com/a/1176023
"""
string = FIRST_CAP_RE.sub(r'\1_\2', string)
return ALL_CAP_RE.sub(r'\1_\2', string).lower()
def prettify(unicode_text):
"""Return a pretty-printed version of a unicode XML string. Useful for
debugging.
"""
import xml.dom.minidom
reparsed = xml.dom.minidom.parseString(unicode_text.encode('utf-8'))
return reparsed.toprettyxml(indent=" ", newl="\n")
def show_xml(xml):
"""Pretty print an ElementTree XML object
Args:
xml (ElementTree): The :py:class:`xml.etree.ElementTree` to pretty
print
NOTE: This function is a convenience function used during development, it
is not used anywhere in the main code base
"""
string = XML.tostring(xml)
print(prettify(string))
class deprecated(object):
""" A decorator to mark deprecated objects.
Causes a warning to be issued when the object is used, and marks the object
as deprecated in the Sphinx docs.
args:
since (str): The version in which the object is deprecated
alternative (str, optional): The name of an alternative object to use
Example:
::
@deprecated(since="0.7", alternative="new_function")
def old_function(args):
pass
"""
# pylint really doesn't like decorators!
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=no-member, missing-docstring
def __init__(self, since, alternative=None, will_be_removed_in=None):
self.since_version = since
self.alternative = alternative
self.will_be_removed_in = will_be_removed_in
def __call__(self, deprecated_fn):
@functools.wraps(deprecated_fn)
def decorated(*args, **kwargs):
message = "Call to deprecated function {0}.".format(
deprecated_fn.__name__)
if self.will_be_removed_in is not None:
message += " Will be removed in version {0}.".format(
self.will_be_removed_in)
if self.alternative is not None:
message += " Use {0} instead.".format(self.alternative)
warnings.warn(message, stacklevel=2)
return deprecated_fn(*args, **kwargs)
docs = "\n\n .. deprecated:: {0}\n".format(self.since_version)
if self.will_be_removed_in is not None:
docs += "\n Will be removed in version {0}.".format(
self.will_be_removed_in)
if self.alternative is not None:
docs += "\n Use {0} instead.".format(self.alternative)
if decorated.__doc__ is None:
decorated.__doc__ = ''
decorated.__doc__ += docs
return decorated
def url_escape_path(path):
""" Escape a string value for a URL request path
>>> url_escape_path("Foo, bar & baz / the hackers")
u'Foo%2C%20bar%20%26%20baz%20%2F%20the%20hackers'
"""
# Using 'safe' arg does not seem to work for python 2.6
return quote_url(path.encode('utf-8')).replace('/', '%2F')
| {
"content_hash": "b7683fc1aab0a29cd0ef9f1f83feb162",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 31.80821917808219,
"alnum_prop": 0.6261843238587425,
"repo_name": "bwhaley/SoCo",
"id": "076e18c6796f783cc3e9a941e47489e4b378e79f",
"size": "4669",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "soco/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "168"
},
{
"name": "CSS",
"bytes": "571"
},
{
"name": "HTML",
"bytes": "4055"
},
{
"name": "Makefile",
"bytes": "66"
},
{
"name": "Python",
"bytes": "398195"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
} |
import sys
import queue
class Vertex:
def __init__(self):
self.edges = {}
def get_edges(self):
return self.edges
def add_edge(self, vertex, distance):
if vertex not in self.edges or distance < self.edges[vertex]:
self.edges[vertex] = distance
class Graph:
def __init__(self, N):
self.vertices = {}
while (N > 0):
self.vertices[N] = Vertex()
N -= 1
def get_vertices(self):
return self.vertices
def get_vertex(self, key):
return self.vertices[key]
def add_vertex(self, key, vertex):
self.vertices[key] = vertex
class Dijkstra:
def __init__(self, graph):
self.graph = graph
def calculate(self, start):
distances = {}
adjacents = queue.PriorityQueue()
adjacents.put((0, start))
while not adjacents.empty():
(distance, vertex) = adjacents.get()
if vertex in distances:
continue
distances[vertex] = distance
self.update_adjacents(vertex, distances, adjacents)
return distances
def update_adjacents(self, parent, distances, adjacents):
edges = self.graph.get_vertex(parent).get_edges()
for vertex, distance in edges.items():
adjacents.put((distances[parent] + distance, vertex)) | {
"content_hash": "923a9fb860d2911bdeaf1f2e421d4893",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 69,
"avg_line_length": 27.11764705882353,
"alnum_prop": 0.5712219812002892,
"repo_name": "zeyuanxy/hacker-rank",
"id": "1eb40a79e8f5949f9d3edd6bef7edf59a0b27b2e",
"size": "1563",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "practice/algorithms/graph-theory/dijkstrashortreach/dijkstrashortreach.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "53922"
},
{
"name": "Common Lisp",
"bytes": "145"
},
{
"name": "Haskell",
"bytes": "51173"
},
{
"name": "Java",
"bytes": "40362"
},
{
"name": "Python",
"bytes": "108602"
},
{
"name": "Ruby",
"bytes": "8213"
},
{
"name": "Shell",
"bytes": "814"
}
],
"symlink_target": ""
} |
from django.db.models.signals import post_save
from celery import task
from fjord.base.utils import key_to_instance
from .utils import translate
@task(rate_limit='60/m')
def translate_task(instance_key, system, src_lang, src_field,
dst_lang, dst_field):
"""Celery task to perform a single translation
.. Note::
This is rate-limited at 60/m.
We really want the translate call to be rate-limited based on
the translation system, but given we're only supporting Gengo
right now, I'm going to do the rate limiting here across all
translation systems rather than figure out how to do it just
for Gengo.
:arg instance_key: The key for the instance we're translating
:arg system: The name of the translation system to use
:arg src_lang: The source language
:arg src_field: The field in the instance holding the text to
translate
:arg dst_lang: The destination language
:arg dst_field: The field in the instance to shove the translated
text into
"""
instance = key_to_instance(instance_key)
translate(instance, system, src_lang, src_field, dst_lang, dst_field)
def create_translation_tasks(instance, system=None):
"""Generate translation tasks for a given translateable instance"""
jobs = instance.generate_translation_jobs(system=system)
if not jobs:
return []
for key, system, src_lang, src_field, dst_lang, dst_field in jobs:
if not getattr(instance, src_field).strip():
# Don't create a job unless there's something to translate.
continue
translate_task.delay(key, system, src_lang, src_field,
dst_lang, dst_field)
return jobs
def translate_handler(sender, instance=None, created=False, **kwargs):
"""post-save handler that generates translation jobs
This only does translation work on instance creation--not update.
This asks the instance to generate translation jobs. If there are
translation jobs to do, then this throws each one into a separate
celery task.
"""
if not created or instance is None:
return
return create_translation_tasks(instance)
# Set of models registered for translation.
REGISTERED_MODELS = set()
def register_auto_translation(model_cls):
"""Decorator that Registers model class for automatic translation
The model class has to have a ``generate_translation_jobs`` method
that takes an instance and generates a list of translation jobs
that need to be performed.
A translation job is a tuple in the form::
(key, system, src_lang, src_field, dst_lang, dst_field)
The key is some string that uniquely identifies the instance so
that we can save the data back to the instance later.
"""
uid = '-'.join([model_cls.__module__, model_cls.__name__, 'translation'])
post_save.connect(translate_handler, model_cls, dispatch_uid=uid)
REGISTERED_MODELS.add(model_cls)
return model_cls
| {
"content_hash": "0ffb69ebfd45ba1f4a1bb8f54c0a72aa",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 77,
"avg_line_length": 32.666666666666664,
"alnum_prop": 0.6892692560895326,
"repo_name": "DESHRAJ/fjord",
"id": "b03dda10b8229bbf81cf09004c8fa8c0f0e3f23b",
"size": "3038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fjord/translations/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "168457"
},
{
"name": "JavaScript",
"bytes": "299449"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "709245"
},
{
"name": "Shell",
"bytes": "13991"
}
],
"symlink_target": ""
} |
"""
GPSDataTools.py: Utilities and class definitions for dealing with raw GPS
tracking data.
In general one is only interested in the Route class, which loads GPS data
from the database for a particular route and automatically turns it into
individual trips.
"""
# Copyright (c) 2010 Colin Bick, Robert Damphousse
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import dbqueries as db
import datetime
from time import time
from sys import argv
from kml_objects import *
import math
from GTFSBusTrack import GTFSBusSchedule,GTFSBusTrack
import gisutils as gis
rad = math.pi / 180.0
THRESH_SEG_ENDPOINT_TO_SHAPE_ENDPOINT = 50 #meters
THRESH_TIME_BETWEEN_REPORTS = 300 # seconds
THRESH_MINIMUM_REPORTS = 10
def now():
return str(int(time()))
class VehicleReport(object):
"""
POD structure representing a single row in the GPS log.
"""
def __init__(self,id,lat,lon,routetag,dirtag,reported_update_time):
self.vehicle_id = id
self.lat = lat
self.lon = lon
self.route_tag = routetag
self.dirtag = dirtag
self.reported_update_time = reported_update_time
def __str__(self):
return """\
vehicle %s on route %s, dirtag %s: %s, %s, time %s
""" % (self.vehicle_id, self.route_tag, self.dirtag,
self.lat,self.lon,self.reported_update_time)
def __eq__(self,other):
return (self.vehicle_id, self.lat, self.lon, self.route_tag,
self.dirtag, self.reported_update_time) \
== (other.vehicle_id, other.lat, other.lon, other.route_tag,
other.dirtag, other.reported_update_time)
def dayOfWeek(self):
"""
0123456 = MTWThFSSu
"""
return self.reported_update_time.weekday()
def timeInSecondsIntoDay(self):
t = self.reported_update_time
h,m,s = t.hour,t.minute,t.second
return s + 60*( m + 60*h )
class VehicleSegment(object):
"""
A list of VehicleReports, representing a single trip made by
a vehicle.
"""
def __init__(self,reports):
self.reports = reports
self.dirtag = reports[-1].dirtag
self.routetag = reports[-1].route_tag
self.lations = [[r.lat,r.lon] for r in reports]
self.shape = None
self.valid = True
def getGTFSRouteInfo(self):
"""
Returns (routeID,directionID) for this trip.
"""
route_id = db.get_route_for_dirtag(self.dirtag, routetag = self.routetag);
dir_id = db.get_direction_for_dirtag(self.dirtag);
print "Dirtag",self.dirtag,"routetag",self.routetag,"matched to",
print route_id,dir_id
return (route_id,dir_id);
def export_segment(self):
"""
Exports this segment to the database.
Returns ( segment_id, (trip_id,offset,error) ),
where segment_id is the tracked_vehicle_segment ID as exported
into the database,
and (trip_id,offset,error) are the gtfs matchup info as returned
by GPSBusTrack.getMatchingGTFSTripID().
If no match is found, returns None.
"""
from GPSBusTrack import GPSBusTrack
#segID = db.getMaxSegID()+1;
bt = GPSBusTrack(self);
tinfo = bt.getMatchingGTFSTripID();
if tinfo is None:
print "No GTFS trip found (%d interp pts)" % (len(self.reports),)
trip_id,offset,error=None,0,0
else:
trip_id,offset,error = tinfo
trip_date = self.reports[0].reported_update_time
rows=[(r.lat,r.lon,r.reported_update_time) for r in self.reports]
veh_id = self.reports[0].vehicle_id;
segID = db.export_gps_route(trip_id, trip_date, veh_id, error, offset, rows);
return segID, tinfo
class TrackedVehicleSegment(object):
"""
A variation on the VehicleSegment class for segments which have
already been identified with a GTFS trip ID and all the associated
information.
"""
def __init__(self,segment_id,useCorrectedGTFS=True):
self.segment_id = segment_id;
self.trip_id, self.trip_date, self.vehicle_id, self.schedule_error, \
self.offset, self.route = db.load_gps_route(segment_id);
self.reports = map(lambda llr: VehicleReport(self.vehicle_id,llr[0],llr[1],
None,None,llr[2]),
self.route);
if self.trip_id is not None:
if useCorrectedGTFS:
self.schedule = GTFSBusTrack(self.trip_id, offset=self.offset,
use_shape=False);
else:
self.schedule = GTFSBusSchedule(self.trip_id,offset=self.offset);
self.route_id = self.schedule.route_id;
self.dir_id = self.schedule.direction_id;
else: #self.trip_id is None
self.route_id = None
self.dir_id = None
self.schedule = None
self.shape = None;
self.min_time = self.reports[0].timeInSecondsIntoDay();
self.max_time = self.reports[-1].timeInSecondsIntoDay();
if self.max_time < self.min_time: self.max_time += 86400
def getGTFSRouteInfo(self):
"""
Returns (routeID,directionID) for this trip.
"""
return self.route_id,self.dir_id;
class Vehicle(object):
"""
Represents a uinque transit vehicle, as definted by its ID
in the vehicle_track table
"""
def __init__(self,vehicle_id):
self.vehicle_id = vehicle_id;
self.reports = []
self.segments = []
class GShape(object):
def __init__(self,id):
self.id=id
self.points=[]
self.dirtag = ''
class Route(object):
"""
Represents the set of vehicle trips belonging to a particular route.
Upon initialization, all VehicleReports are found for that route,
and subsequently segmented into appropriate VehicleSegments.
"""
def __init__(self,route_short_name,tzdiff=0):
self.route_short_name = str(route_short_name)
self.dirtags=[]
self.shapes = {}
self._vehicles = {}
print "Loading Dirtags..."
self.load_route_dirtags()
print "\t%s"% '\n\t'.join(self.dirtags)
#print "\t%s" % ' '.join(self.dirtags)
print "Loading Shapes..."
self.load_shapes()
print "\tLoaded %s shapes: %s" % (len(self.shapes),', '.join([ shape_id for shape_id,shape in self.shapes.items()]))
self.load_vehicle_reports(tzdiff)
print "\tFound %s vehicles" % len(self._vehicles)
print "Finding route segments..."
self.find_segments()
if self.shapes:
self.filter_by_endpoint()
else:
print "No shapes found, skipping shape check"
self.filter_by_report_time()
def load_route_dirtags(self):
self.dirtags.extend(db.get_route_dirtags(self.route_short_name));
def load_vehicle_reports(self,tzdiff):
print "Loading vehicle reports..."
rows = db.get_vehicle_reports(self.dirtags,tzdiff);
print "\tDB fetch complete (%d rows). Sorting into objects.." % (len(rows),)
def helper(row):
vehicle_id = row['id']
vehicle = self._vehicles.get(vehicle_id);
if vehicle is None:
vehicle = Vehicle(vehicle_id);
self._vehicles[vehicle_id] = vehicle;
vehicle.reports.append(VehicleReport(*row))
map( helper, rows );
def load_shapes(self):
rows = db.get_shapes_for_route(self.route_short_name);
for row in rows:
shape_id = row['shape_id']
dirtag = row['dirtag']
gshape = self.shapes.get(shape_id);
if gshape is None:
gshape = GShape(shape_id);
gshape.dirtag = dirtag
self.shapes[shape_id] = gshape
self.dirtags.append(dirtag)
gshape.points.append([row['shape_pt_lat'],row['shape_pt_lon']])
def find_segments(self):
dropped = 0
for vehicle in self.vehicles():
#print "\tSegmenting Vehicle %s..." % vehicle.vehicle_id
last_report = vehicle.reports[0]
reports=[last_report]
for report in vehicle.reports[1:]:
report_delay = report.reported_update_time - last_report.reported_update_time
report_delay_seconds = 86400*report_delay.days + report_delay.seconds
if report.dirtag != last_report.dirtag \
or report_delay_seconds > THRESH_TIME_BETWEEN_REPORTS:
if len(reports) > THRESH_MINIMUM_REPORTS:
seg = VehicleSegment(reports);
seg.shape = self.shape_for_dirtag(seg.dirtag)
vehicle.segments.append(seg);
else:
dropped += 1
reports=[]
reports.append(report)
last_report = report
#print "\t\t%s segments found" % len(vehicle.segments)
print "\tFound %d segments" % len([s for s in self.segments()])
print "\tDropped %d segments for being too short" % (dropped,)
print "\tRemoving vehicles that have no segments..."
c=0
for vehicle in self.vehicles():
if not vehicle.segments:
c+=1
del self._vehicles[vehicle.vehicle_id]
print "\tRemoved %d vehicles"%c
def filter_by_endpoint(self):
print "Filtering segments by comparing segment endpoints to possible gtf_shape(s)..."
c=0
for seg in self.segments(return_valid_only=True):
seg_start_lation = seg.lations[0]
seg_end_lation = seg.lations[-1]
s = seg.shape #self.shape_for_dirtag(seg.dirtag)
if s is None:
continue
shape_start_lation = s.points[0]#.lation
shape_end_lation = s.points[-1]#.lation
start_point_distance = calcDistance(shape_start_lation,seg_start_lation)
end_point_distance = calcDistance(shape_end_lation,seg_end_lation)
if start_point_distance > THRESH_SEG_ENDPOINT_TO_SHAPE_ENDPOINT:
seg.valid = False
c+=1
else:
seg.valid = True
print "\t%s marked as invalid" % c
def filter_by_report_time(self):
print "Filtering by comparing times between reports..."
c=0
for seg in self.segments(return_valid_only=True):
last = seg.reports[0]
avg=[]
for r in seg.reports[1:]:
t=int((r.reported_update_time - last.reported_update_time).seconds)
avg.append(t)
if t > THRESH_TIME_BETWEEN_REPORTS:
seg.valid = False
dist = calcDistance( (last.lat,last.lon) , (r.lat,r.lon) )
print "Distance:",dist
last = r
if not seg.valid:
c+=1
print "Invalid, max delay:",max(avg)
print "\t%s marked invalid" % c
def segments(self,return_valid_only=False):
sorted_vehicles = self._vehicles.items()
sorted_vehicles.sort()
for vid,vehicle in sorted_vehicles:
for seg in vehicle.segments:
if return_valid_only and seg.valid == False:
continue
else:
yield seg
def shape_for_dirtag(self,dirtag):
for shape_id,shape in self.shapes.items():
if shape.dirtag == dirtag:
return shape
def vehicles(self):
sorted_vehicles = self._vehicles.items()
sorted_vehicles.sort()
for vid,vehicle in sorted_vehicles:
yield vehicle
def clear_filters(self):
for seg in self.segments():
seg.valid = True
def export_segments(self,valid_only=True):
segs = list(self.segments(valid_only));
for i,seg in enumerate(segs):
print "Exporting (%d/%d)..."%(i+1,len(segs))
seg.export_segment();
def calcDistance(lation1,lation2):
"""
Caclulate distance between two lat lons in meters
"""
return gis.distance_meters( map(float,lation1),
map(float,lation2) )
def gen_kml(route,dopoints=False,dotimestamps=False):
print "Building KML.."
#Pepare dirtag folders
dirTagFolders = {}
for tag in route.dirtags:
dirTagFolders[tag] = {}#KFolder(tag)
invalid_paths = KFolder('INVALID')
for vehicle in route.vehicles():
vehicle_folder = KFolder(vehicle.vehicle_id)
for seg in vehicle.segments:
if dopoints:
point_folder = KFolder("points")
point_folder.visibility = False
folder = KFolder()
folder.name = "#%03d %s - %s " % (vehicle.segments.index(seg),vehicle.vehicle_id,seg.dirtag)
path = KPath()
for r in seg.reports:
l = [r.lat,r.lon]
path.add(l)
if dopoints:
p=KPlacemark(KPoint(l),name=r.reported_update_time)
p.visibility=False
point_folder.add(p)
folder.add(KPlacemark(path,folder.name,style_url='segmentLine'))
if dopoints:
folder.add(point_folder)
if dotimestamps:
folder.add(KPlacemark(KPoint(seg.lations[0]),name=seg.reports[0].reported_update_time,style_url='map_shaded_dot_true'))
folder.add(KPlacemark(KPoint(seg.lations[-1]),name=seg.reports[-1].reported_update_time,style_url='map_shaded_dot_false'))
else:
folder.add(KPlacemark(KPoint(seg.lations[0]),name='',style_url='map_shaded_dot_true'))
folder.add(KPlacemark(KPoint(seg.lations[-1]),name='',style_url='map_shaded_dot_false'))
if seg.valid is True:
if not dirTagFolders[seg.dirtag].has_key(vehicle.vehicle_id):
dirTagFolders[seg.dirtag][vehicle.vehicle_id] = KFolder(vehicle.vehicle_id)
dirTagFolders[seg.dirtag][vehicle.vehicle_id].add(folder)
else:
folder.visibility = False
invalid_paths.add(folder)
dir_folder = KFolder('Directions')
sorted_dirs = dirTagFolders.items()
sorted_dirs.sort()
for dirtag,vfolders in sorted_dirs:
dirFolder = KFolder(dirtag)
for vid,vfolder in vfolders.items():
dirFolder.add(vfolder)
dir_folder.add(dirFolder)
main_document = KFolder('Route %s'%route.route_short_name)
main_document.add(dir_folder)
#Creaate a folder which draws the gtf_shape deifintions
shape_folder = KFolder('Shapes')
if route.shapes:
for shape_id,shape in route.shapes.items():
path = KPath()
path.lations = [ l for l in shape.points]
shape_folder.add(KPlacemark(path,name=shape_id,style_url='gShapeLine'))
main_document.add(shape_folder)
kml_doc = KDocument(template_path='kml/document.kml',
docname="Test %s" % now(),
fname="%s_segments.kml" % route.route_short_name,
top_object=main_document,
style_doc='kml/segment_find_styles.kml')
print "Writing..."
kml_doc.write()
if __name__ == "__main__":
route = Route(argv[1])
gen_kml(route)
| {
"content_hash": "0f3722144bdbb99180e1f32fb203a57a",
"timestamp": "",
"source": "github",
"line_count": 476,
"max_line_length": 130,
"avg_line_length": 32.01890756302521,
"alnum_prop": 0.6422806902434224,
"repo_name": "cbick/gps2gtfs",
"id": "9e0bbbab04d66ad55fef704ddcffab85da55194e",
"size": "15241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/src/GPSDataTools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "215724"
},
{
"name": "Shell",
"bytes": "220"
}
],
"symlink_target": ""
} |
class vtkAVIWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkAVIWriter - Writes Windows AVI files.
Super Class:
vtkGenericMovieWriter
vtkAVIWriter writes AVI files. The data type
of the file is unsigned char regardless of the input type.
See Also:
vtkGenericMovieWriter vtkMPEG2Writer
"""
class vtkAVSucdReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkAVSucdReader - reads a dataset in AVS "UCD" format
Super Class:
vtkUnstructuredGridAlgorithm
vtkAVSucdReader creates an unstructured grid dataset. It reads binary or
ASCII files stored in UCD format, with optional data stored at the nodes
or at the cells of the model. A cell-based fielddata stores the material
id. The class can automatically detect the endian-ness of the binary files.
See Also:
vtkGAMBITReader
"""
class vtkAppendFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkAppendFilter - appends one or more datasets together into a single unstructured grid
Super Class:
vtkUnstructuredGridAlgorithm
vtkAppendFilter is a filter that appends one of more datasets into a single
unstructured grid. All geometry is extracted and appended, but point
attributes (i.e., scalars, vectors, normals, field data, etc.) are extracted
and appended only if all datasets have the point attributes available.
(For example, if one dataset has scalars but another does not, scalars will
not be appended.)
See Also:
vtkAppendPolyData
"""
class vtkAppendPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkAppendPolyData - appends one or more polygonal datasets together
Super Class:
vtkPolyDataAlgorithm
vtkAppendPolyData is a filter that appends one of more polygonal datasets
into a single polygonal dataset. All geometry is extracted and appended,
but point and cell attributes (i.e., scalars, vectors, normals) are
extracted and appended only if all datasets have the point and/or cell
attributes available. (For example, if one dataset has point scalars but
another does not, point scalars will not be appended.)
See Also:
vtkAppendFilter
"""
class vtkAppendSelection:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkAppendSelection - appends one or more selections together
Super Class:
vtkSelectionAlgorithm
vtkAppendSelection is a filter that appends one of more selections into
a single selection. All selections must have the same content type.
"""
class vtkArcPlotter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkArcPlotter - plot data along an arbitrary polyline
Super Class:
vtkPolyDataAlgorithm
vtkArcPlotter performs plotting of attribute data along polylines defined
with an input vtkPolyData data object. Any type of attribute data can be
plotted including scalars, vectors, tensors, normals, texture coordinates,
and field data. Either one or multiple data components can be plotted.
To use this class you must specify an input data set that contains one or
more polylines, and some attribute data including which component of the
attribute data. (By default, this class processes the first component of
scalar data.) You will also need to set an offset radius (the distance
of the polyline to the median line of the plot), a width for the plot
(the distance that the minimum and maximum plot values are mapped into),
an possibly an offset (used to offset attribute data with multiple
components).
Normally the filter automatically computes normals for generating the
offset arc plot. However, you can specify a default normal and use that
instead.
See Also:
vtkXYPlotActor
"""
class vtkArrayCalculator:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkArrayCalculator - perform mathematical operations on data in field data arrays
Super Class:
vtkDataSetAlgorithm
vtkArrayCalculator performs operations on vectors or scalars in field
data arrays. It uses vtkFunctionParser to do the parsing and to
evaluate the function for each entry in the input arrays. The arrays
used in a given function must be all in point data or all in cell data.
The resulting array will be stored as a field data array. The result
array can either be stored in a new array or it can overwrite an existing
array.
The functions that this array calculator understands is:
<pre>
standard operations: + - * / ^ .
access vector components: iHat, jHat, kHat
abs
acos
asin
atan
ceil
cos
cosh
exp
floor
log
mag
min
max
norm
sign
sin
sinh
sqrt
tan
tanh
</pre>
Note that some of these operations work on scalars, some on vectors, and some on
both (e.g., you can multiply a scalar times a vector). The operations are performed
tuple-wise (i.e., tuple-by-tuple). The user must specify which arrays to use as
vectors and/or scalars, and the name of the output data array.
See Also:
vtkFunctionParser
"""
class vtkArrowSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkArrowSource - Appends a cylinder to a cone to form an arrow.
Super Class:
vtkPolyDataAlgorithm
vtkArrowSource was intended to be used as the source for a glyph.
The shaft base is always at (0,0,0). The arrow tip is always at (1,0,0).
The resolution of the cone and shaft can be set and default to 6.
The radius of the cone and shaft can be set and default to 0.03 and 0.1.
The length of the tip can also be set, and defaults to 0.35.
"""
class vtkAssignAttribute:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkAssignAttribute - Labels a field as an attribute
Super Class:
vtkDataSetAlgorithm
vtkAssignAttribute is use to label a field (vtkDataArray) as an attribute.
A field name or an attribute to labeled can be specified. For example:
@verbatim
aa->Assign("foo", vtkDataSetAttributes::SCALARS,
vtkAssignAttribute::POINT_DATA);
@endverbatim
tells vtkAssignAttribute to make the array in the point data called
"foo" the active scalars. On the other hand,
@verbatim
aa->Assign(vtkDataSetAttributes::VECTORS, vtkDataSetAttributes::SCALARS,
vtkAssignAttribute::POINT_DATA);
@endverbatim
tells vtkAssignAttribute to make the active vectors also the active
scalars. The same can be done more easily from Tcl by using the Assign()
method which takes strings:
@verbatim
aa Assign "foo" SCALARS POINT_DATA
or
aa Assign SCALARS VECTORS POINT_DATA
AttributeTypes: SCALARS, VECTORS, NORMALS, TCOORDS, TENSORS
Attribute locations: POINT_DATA, CELL_DATA
@endverbatim
Caveats:
When using Tcl, Java, Python or Visual Basic bindings, the array name
can not be one of the AttributeTypes when calling Assign() which takes
strings as arguments. The Tcl (Java etc.) command will
always assume the string corresponds to an attribute type when
the argument is one of the AttributeTypes. In this situation,
use the Assign() which takes enums.
See Also:
vtkFieldData vtkDataSet vtkDataObjectToDataSetFilter
vtkDataSetAttributes vtkDataArray vtkRearrangeFields
vtkSplitField vtkMergeFields
"""
class vtkAttributeDataToFieldDataFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkAttributeDataToFieldDataFilter - map attribute data to field data
Super Class:
vtkDataSetAlgorithm
vtkAttributeDataToFieldDataFilter is a class that maps attribute data into
field data. Since this filter is a subclass of vtkDataSetAlgorithm,
the output dataset (whose structure is the same as the input dataset),
will contain the field data that is generated. The filter will convert
point and cell attribute data to field data and assign it as point and
cell field data, replacing any point or field data that was there
previously. By default, the original non-field point and cell attribute
data will be passed to the output of the filter, although you can shut
this behavior down.
Caveats:
Reference counting the underlying data arrays is used to create the field
data. Therefore, no extra memory is utilized.
The original field data (if any) associated with the point and cell
attribute data is placed into the generated fields along with the scalars,
vectors, etc.
See Also:
vtkFieldData vtkDataObject vtkDataSet vtkFieldDataToAttributeDataFilter
"""
class vtkAxes:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkAxes - create an x-y-z axes
Super Class:
vtkPolyDataAlgorithm
vtkAxes creates three lines that form an x-y-z axes. The origin of the
axes is user specified (0,0,0 is default), and the size is specified with
a scale factor. Three scalar values are generated for the three lines and
can be used (via color map) to indicate a particular coordinate axis.
"""
class vtkBMPReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkBMPReader - read Windows BMP files
Super Class:
vtkImageReader
vtkBMPReader is a source object that reads Windows BMP files.
This includes indexed and 24bit bitmaps
Usually, all BMPs are converted to 24bit RGB, but BMPs may be output
as 8bit images with a LookupTable if the Allow8BitBMP flag is set.
BMPReader creates structured point datasets. The dimension of the
dataset depends upon the number of files read. Reading a single file
results in a 2D image, while reading more than one file results in a
3D volume.
To read a volume, files must be of the form "FileName.<number>"
(e.g., foo.bmp.0, foo.bmp.1, ...). You must also specify the image
range. This range specifies the beginning and ending files to read (range
can be any pair of non-negative numbers).
The default behavior is to read a single file. In this case, the form
of the file is simply "FileName" (e.g., foo.bmp).
See Also:
vtkBMPWriter
"""
class vtkBMPWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkBMPWriter - Writes Windows BMP files.
Super Class:
vtkImageWriter
vtkBMPWriter writes BMP files. The data type
of the file is unsigned char regardless of the input type.
See Also:
vtkBMPReader
"""
class vtkBYUReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkBYUReader - read MOVIE.BYU polygon files
Super Class:
vtkPolyDataAlgorithm
vtkBYUReader is a source object that reads MOVIE.BYU polygon files.
These files consist of a geometry file (.g), a scalar file (.s), a
displacement or vector file (.d), and a 2D texture coordinate file
(.t).
"""
class vtkBYUWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkBYUWriter - write MOVIE.BYU files
Super Class:
vtkPolyDataWriter
vtkBYUWriter writes MOVIE.BYU polygonal files. These files consist
of a geometry file (.g), a scalar file (.s), a displacement or
vector file (.d), and a 2D texture coordinate file (.t). These files
must be specified to the object, the appropriate boolean
variables must be true, and data must be available from the input
for the files to be written.
WARNING: this writer does not currently write triangle strips. Use
vtkTriangleFilter to convert strips to triangles.
"""
class vtkBandedPolyDataContourFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkBandedPolyDataContourFilter - generate filled contours for vtkPolyData
Super Class:
vtkPolyDataAlgorithm
vtkBandedPolyDataContourFilter is a filter that takes as input vtkPolyData
and produces as output filled contours (also represented as vtkPolyData).
Filled contours are bands of cells that all have the same cell scalar
value, and can therefore be colored the same. The method is also referred
to as filled contour generation.
To use this filter you must specify one or more contour values. You can
either use the method SetValue() to specify each contour value, or use
GenerateValues() to generate a series of evenly spaced contours. Each
contour value divides (or clips) the data into two pieces, values below
the contour value, and values above it. The scalar values of each
band correspond to the specified contour value. Note that if the first and
last contour values are not the minimum/maximum contour range, then two
extra contour values are added corresponding to the minimum and maximum
range values. These extra contour bands can be prevented from being output
by turning clipping on.
See Also:
vtkClipDataSet vtkClipPolyData vtkClipVolume vtkContourFilter
"""
class vtkBlankStructuredGrid:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkBlankStructuredGrid - translate point attribute data into a blanking field
Super Class:
vtkStructuredGridAlgorithm
vtkBlankStructuredGrid is a filter that sets the blanking field in a
vtkStructuredGrid dataset. The blanking field is set by examining a
specified point attribute data array (e.g., scalars) and converting
values in the data array to either a "1" (visible) or "0" (blanked) value
in the blanking array. The values to be blanked are specified by giving
a min/max range. All data values in the data array indicated and laying
within the range specified (inclusive on both ends) are translated to
a "off" blanking value.
See Also:
vtkStructuredGrid
"""
class vtkBlankStructuredGridWithImage:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkBlankStructuredGridWithImage - blank a structured grid with an image
Super Class:
vtkStructuredGridAlgorithm
This filter can be used to set the blanking in a structured grid with
an image. The filter takes two inputs: the structured grid to blank,
and the image used to set the blanking. Make sure that the dimensions of
both the image and the structured grid are identical.
Note that the image is interpreted as follows: zero values indicate that
the structured grid point is blanked; non-zero values indicate that the
structured grid point is visible. The blanking data must be unsigned char.
See Also:
vtkStructuredGrid
"""
class vtkBooleanTexture:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkBooleanTexture - generate 2D texture map based on combinations of inside, outside, and on region boundary
Super Class:
vtkImageAlgorithm
vtkBooleanTexture is a filter to generate a 2D texture map based on
combinations of inside, outside, and on region boundary. The "region" is
implicitly represented via 2D texture coordinates. These texture
coordinates are normally generated using a filter like
vtkImplicitTextureCoords, which generates the texture coordinates for
any implicit function.
vtkBooleanTexture generates the map according to the s-t texture
coordinates plus the notion of being in, on, or outside of a
region. An in region is when the texture coordinate is between
(0,0.5-thickness/2). An out region is where the texture coordinate
is (0.5+thickness/2). An on region is between
(0.5-thickness/2,0.5+thickness/2). The combination in, on, and out
for each of the s-t texture coordinates results in 16 possible
combinations (see text). For each combination, a different value of
intensity and transparency can be assigned. To assign maximum intensity
and/or opacity use the value 255. A minimum value of 0 results in
a black region (for intensity) and a fully transparent region (for
transparency).
See Also:
vtkImplicitTextureCoords vtkThresholdTextureCoords
"""
class vtkBoxClipDataSet:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkBoxClipDataSet - clip an unstructured grid
Super Class:
vtkUnstructuredGridAlgorithm
Clipping means that is actually 'cuts' through the cells of the dataset,
returning tetrahedral cells inside of the box.
The output of this filter is an unstructured grid.
This filter can be configured to compute a second output. The
second output is the part of the cell that is clipped away. Set the
GenerateClippedData boolean on if you wish to access this output data.
The vtkBoxClipDataSet will triangulate all types of 3D cells (i.e, create tetrahedra).
This is necessary to preserve compatibility across face neighbors.
To use this filter,you can decide if you will be clipping with a box or a hexahedral box.
1) Set orientation
if(SetOrientation(0)): box (parallel with coordinate axis)
SetBoxClip(xmin,xmax,ymin,ymax,zmin,zmax)
if(SetOrientation(1)): hexahedral box (Default)
SetBoxClip(n[0],o[0],n[1],o[1],n[2],o[2],n[3],o[3],n[4],o[4],n[5],o[5])
PlaneNormal[] normal of each plane
PlanePoint[] point on the plane
2) Apply the GenerateClipScalarsOn()
3) Execute clipping Update();
"""
class vtkBrownianPoints:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkBrownianPoints - assign random vector to points
Super Class:
vtkDataSetAlgorithm
vtkBrownianPoints is a filter object that assigns a random vector (i.e.,
magnitude and direction) to each point. The minimum and maximum speed
values can be controlled by the user.
"""
class vtkButterflySubdivisionFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkButterflySubdivisionFilter - generate a subdivision surface using the Butterfly Scheme
Super Class:
vtkInterpolatingSubdivisionFilter
vtkButterflySubdivisionFilter is an interpolating subdivision scheme
that creates four new triangles for each triangle in the mesh. The
user can specify the NumberOfSubdivisions. This filter implements the
8-point butterfly scheme described in: Zorin, D., Schroder, P., and
Sweldens, W., "Interpolating Subdivisions for Meshes with Arbitrary
Topology," Computer Graphics Proceedings, Annual Conference Series,
1996, ACM SIGGRAPH, pp.189-192. This scheme improves previous
butterfly subdivisions with special treatment of vertices with valence
other than 6.
Currently, the filter only operates on triangles. Users should use the
vtkTriangleFilter to triangulate meshes that contain polygons or
triangle strips.
The filter interpolates point data using the same scheme. New
triangles created at a subdivision step will have the cell data of
their parent cell.
See Also:
vtkInterpolatingSubdivisionFilter vtkLinearSubdivisionFilter
"""
class vtkCGMWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkCGMWriter - write polygonal data as a CGM file
Super Class:
vtkPolyDataWriter
vtkCGMWriter writes CGM (Computer Graphics Metafile) output. CGM is a 2D
graphics vector format typically used by large plotters. This writer can
handle vertices, lines, polygons, and triangle strips in any combination.
Colors are specified either 1) from cell scalars (assumed to be RGB or
RGBA color specification), 2) from a specified color; or 3) randomly
assigned colors.
Note: During output of the polygonal data, triangle strips are converted
to triangles, and polylines to lines. Also, due to limitations in the CGM
color model, only 256 colors are available to the color palette.
Caveats:
The class vtkImageToPolyDataFilter is convenient for converting a raster
image into polygons (and color map) suitable for plotting with CGM.
See Also:
vtkPolyDataWriter vtkPointDataToCellData
"""
class vtkCastToConcrete:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCastToConcrete - works around type-checking limitations
Super Class:
vtkDataSetAlgorithm
vtkCastToConcrete is a filter that works around type-checking limitations
in the filter classes. Some filters generate abstract types on output,
and cannot be connected to the input of filters requiring a concrete
input type. For example, vtkElevationFilter generates vtkDataSet for output,
and cannot be connected to vtkDecimate, because vtkDecimate requires
vtkPolyData as input. This is true even though (in this example) the input
to vtkElevationFilter is of type vtkPolyData, and you know the output of
vtkElevationFilter is the same type as its input.
vtkCastToConcrete performs run-time checking to insure that output type
is of the right type. An error message will result if you try to cast
an input type improperly. Otherwise, the filter performs the appropriate
cast and returns the data.
Caveats:
You must specify the input before you can get the output. Otherwise an
error results.
See Also:
vtkDataSetAlgorithm vtkPointSetToPointSetFilter
"""
class vtkCellCenters:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCellCenters - generate points at center of cells
Super Class:
vtkPolyDataAlgorithm
vtkCellCenters is a filter that takes as input any dataset and
generates on output points at the center of the cells in the dataset.
These points can be used for placing glyphs (vtkGlyph3D) or labeling
(vtkLabeledDataMapper). (The center is the parametric center of the
cell, not necessarily the geometric or bounding box center.) The cell
attributes will be associated with the points on output.
Caveats:
You can choose to generate just points or points and vertex cells.
Vertex cells are drawn during rendering; points are not. Use the ivar
VertexCells to generate cells.
See Also:
vtkGlyph3D vtkLabeledDataMapper
"""
class vtkCellDataToPointData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCellDataToPointData - map cell data to point data
Super Class:
vtkDataSetAlgorithm
vtkCellDataToPointData is a filter that transforms cell data (i.e., data
specified per cell) into point data (i.e., data specified at cell
points). The method of transformation is based on averaging the data
values of all cells using a particular point. Optionally, the input cell
data can be passed through to the output as well.
Caveats:
This filter is an abstract filter, that is, the output is an abstract type
(i.e., vtkDataSet). Use the convenience methods (e.g.,
GetPolyDataOutput(), GetStructuredPointsOutput(), etc.) to get the type
of output you want.
See Also:
vtkPointData vtkCellData vtkPointDataToCellData
"""
class vtkCellDerivatives:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCellDerivatives - compute derivatives of scalars and vectors
Super Class:
vtkDataSetAlgorithm
vtkCellDerivatives is a filter that computes derivatives of scalars
and vectors at the center of cells. You can choose to generate
different output including the scalar gradient (a vector), computed
tensor vorticity (a vector), gradient of input vectors (a tensor),
and strain matrix of the input vectors (a tensor); or you may
choose to pass data through to the output.
Note that it is assumed that on input scalars and vector point data
is available, which are then used to generate cell vectors and tensors.
(The interpolation functions of the cells are used to compute the
derivatives which is why point data is required.)
Caveats:
The computed derivatives are cell attribute data; you can convert them to
point attribute data by using the vtkCellDataToPointData filter.
Note that, due to the interpolation function used (obtained using
1/r**2 normalized sum), the derivatives calculated for polygons
with more than 4 vertices are inaccurate in most cases.
The point data is passed through the filter to the output.
See Also:
vtkVectorNorm
"""
class vtkChacoReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkChacoReader - Read a Chaco file and create a vtkUnstructuredGrid.
Super Class:
vtkUnstructuredGridAlgorithm
vtkChacoReader is an unstructured grid source object that reads Chaco
files. The reader DOES NOT respond to piece requests. Chaco
is a graph partitioning package developed at Sandia National Laboratories
in the early 1990s. (http://www.cs.sandia.gov/~bahendr/chaco.html)
Note that the Chaco "edges" become VTK "cells", and the Chaco
"vertices" become VTK "points".
"""
class vtkCleanPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCleanPolyData - merge duplicate points, and/or remove unused points and/or remove degenerate cells
Super Class:
vtkPolyDataAlgorithm
vtkCleanPolyData is a filter that takes polygonal data as input and
generates polygonal data as output. vtkCleanPolyData can merge duplicate
points (within specified tolerance and if enabled), eliminate points
that are not used, and if enabled, transform degenerate cells into
appropriate forms (for example, a triangle is converted into a line
if two points of triangle are merged).
Conversion of degenerate cells is controlled by the flags
ConvertLinesToPoints, ConvertPolysToLines, ConvertStripsToPolys which act
cumulatively such that a degenerate strip may become a poly.
The full set is
Line with 1 points -> Vert (if ConvertLinesToPoints)
Poly with 2 points -> Line (if ConvertPolysToLines)
Poly with 1 points -> Vert (if ConvertPolysToLines && ConvertLinesToPoints)
Strp with 3 points -> Poly (if ConvertStripsToPolys)
Strp with 2 points -> Line (if ConvertStripsToPolys && ConvertPolysToLines)
Strp with 1 points -> Vert (if ConvertStripsToPolys && ConvertPolysToLines
&& ConvertLinesToPoints)
If tolerance is specified precisely=0.0, then vtkCleanPolyData will use
the vtkMergePoints object to merge points (which is faster). Otherwise the
slower vtkPointLocator is used. Before inserting points into the point
locator, this class calls a function OperateOnPoint which can be used (in
subclasses) to further refine the cleaning process. See
vtkQuantizePolyDataPoints.
Note that merging of points can be disabled. In this case, a point locator
will not be used, and points that are not used by any cells will be
eliminated, but never merged.
Caveats:
Merging points can alter topology, including introducing non-manifold
forms. The tolerance should be chosen carefully to avoid these problems.
Subclasses should handle OperateOnBounds as well as OperateOnPoint
to ensure that the locator is correctly initialized (i.e. all modified
points must lie inside modified bounds).
See Also:
vtkQuantizePolyDataPoints
"""
class vtkClipDataSet:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkClipDataSet - clip any dataset with user-specified implicit function or input scalar data
Super Class:
vtkUnstructuredGridAlgorithm
vtkClipDataSet is a filter that clips any type of dataset using either
any subclass of vtkImplicitFunction, or the input scalar
data. Clipping means that it actually "cuts" through the cells of
the dataset, returning everything inside of the specified implicit
function (or greater than the scalar value) including "pieces" of
a cell. (Compare this with vtkExtractGeometry, which pulls out
entire, uncut cells.) The output of this filter is an unstructured
grid.
To use this filter, you must decide if you will be clipping with an
implicit function, or whether you will be using the input scalar
data. If you want to clip with an implicit function, you must:
1) define an implicit function
2) set it with the SetClipFunction method
3) apply the GenerateClipScalarsOn method
If a ClipFunction is not specified, or GenerateClipScalars is off
(the default), then the input's scalar data will be used to clip
the polydata.
You can also specify a scalar value, which is used to decide what is
inside and outside of the implicit function. You can also reverse the
sense of what inside/outside is by setting the InsideOut instance
variable. (The clipping algorithm proceeds by computing an implicit
function value or using the input scalar data for each point in the
dataset. This is compared to the scalar value to determine
inside/outside.)
This filter can be configured to compute a second output. The
second output is the part of the cell that is clipped away. Set the
GenerateClippedData boolean on if you wish to access this output data.
Caveats:
vtkClipDataSet will triangulate all types of 3D cells (i.e., create
tetrahedra). This is true even if the cell is not actually cut. This
is necessary to preserve compatibility across face neighbors. 2D cells
will only be triangulated if the cutting function passes through them.
See Also:
vtkImplicitFunction vtkCutter vtkClipVolume vtkClipPolyData
"""
class vtkClipHyperOctree:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkClipHyperOctree - clip an hyperoctree with user-specified implicit function or input scalar data
Super Class:
vtkUnstructuredGridAlgorithm
vtkClipHyperOctree is a filter that clips an hyperoctree using either
any subclass of vtkImplicitFunction, or the input scalar
data. Clipping means that it actually "cuts" through the leaves (cells) of
the hyperoctree, returning everything inside of the specified implicit
function (or greater than the scalar value) including "pieces" of
a cell. (Compare this with vtkExtractGeometry, which pulls out
entire, uncut cells.) The output of this filter is an unstructured
grid.
To use this filter, you must decide if you will be clipping with an
implicit function, or whether you will be using the input scalar
data. If you want to clip with an implicit function, you must:
1) define an implicit function
2) set it with the SetClipFunction method
3) apply the GenerateClipScalarsOn method
If a ClipFunction is not specified, or GenerateClipScalars is off
(the default), then the input's scalar data will be used to clip
the polydata.
You can also specify a scalar value, which is used to decide what is
inside and outside of the implicit function. You can also reverse the
sense of what inside/outside is by setting the InsideOut instance
variable. (The clipping algorithm proceeds by computing an implicit
function value or using the input scalar data for each point in the
dataset. This is compared to the scalar value to determine
inside/outside.)
This filter can be configured to compute a second output. The
second output is the part of the cell that is clipped away. Set the
GenerateClippedData boolean on if you wish to access this output data.
Caveats:
vtkClipHyperOctree will triangulate all types of 3D cells (i.e., create
tetrahedra). This is true even if the cell is not actually cut. This
is necessary to preserve compatibility across face neighbors. 2D cells
will only be triangulated if the cutting function passes through them.
See Also:
vtkImplicitFunction vtkCutter vtkClipVolume vtkClipPolyData
"""
class vtkClipPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkClipPolyData - clip polygonal data with user-specified implicit function or input scalar data
Super Class:
vtkPolyDataAlgorithm
vtkClipPolyData is a filter that clips polygonal data using either
any subclass of vtkImplicitFunction, or the input scalar
data. Clipping means that it actually "cuts" through the cells of
the dataset, returning everything inside of the specified implicit
function (or greater than the scalar value) including "pieces" of
a cell. (Compare this with vtkExtractGeometry, which pulls out
entire, uncut cells.) The output of this filter is polygonal data.
To use this filter, you must decide if you will be clipping with an
implicit function, or whether you will be using the input scalar
data. If you want to clip with an implicit function, you must:
1) define an implicit function
2) set it with the SetClipFunction method
3) apply the GenerateClipScalarsOn method
If a ClipFunction is not specified, or GenerateClipScalars is off
(the default), then the input's scalar data will be used to clip
the polydata.
You can also specify a scalar value, which is used to
decide what is inside and outside of the implicit function. You can
also reverse the sense of what inside/outside is by setting the
InsideOut instance variable. (The cutting algorithm proceeds by
computing an implicit function value or using the input scalar data
for each point in the dataset. This is compared to the scalar value
to determine inside/outside.)
This filter can be configured to compute a second output. The
second output is the polygonal data that is clipped away. Set the
GenerateClippedData boolean on if you wish to access this output data.
Caveats:
In order to cut all types of cells in polygonal data, vtkClipPolyData
triangulates some cells, and then cuts the resulting simplices
(i.e., points, lines, and triangles). This means that the resulting
output may consist of different cell types than the input data.
See Also:
vtkImplicitFunction vtkCutter vtkClipVolume
"""
class vtkClipVolume:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkClipVolume - clip volume data with user-specified implicit function or input scalar data
Super Class:
vtkUnstructuredGridAlgorithm
vtkClipVolume is a filter that clips volume data (i.e., vtkImageData)
using either: any subclass of vtkImplicitFunction or the input scalar
data. The clipping operation cuts through the cells of the
dataset--converting 3D image data into a 3D unstructured grid--returning
everything inside of the specified implicit function (or greater than the
scalar value). During the clipping the filter will produce pieces of a
cell. (Compare this with vtkExtractGeometry or vtkGeometryFilter, which
produces entire, uncut cells.) The output of this filter is a 3D
unstructured grid (e.g., tetrahedra or other 3D cell types).
To use this filter, you must decide if you will be clipping with an
implicit function, or whether you will be using the input scalar data. If
you want to clip with an implicit function, you must first define and then
set the implicit function with the SetClipFunction() method. Otherwise,
you must make sure input scalar data is available. You can also specify a
scalar value, which is used to decide what is inside and outside of the
implicit function. You can also reverse the sense of what inside/outside
is by setting the InsideOut instance variable. (The cutting algorithm
proceeds by computing an implicit function value or using the input scalar
data for each point in the dataset. This is compared to the scalar value
to determine inside/outside.)
This filter can be configured to compute a second output. The
second output is the portion of the volume that is clipped away. Set the
GenerateClippedData boolean on if you wish to access this output data.
The filter will produce an unstructured grid of entirely tetrahedra or a
mixed grid of tetrahedra and other 3D cell types (e.g., wedges). Control
this behavior by setting the Mixed3DCellGeneration. By default the
Mixed3DCellGeneration is on and a combination of cell types will be
produced. Note that producing mixed cell types is a faster than producing
only tetrahedra.
Caveats:
This filter is designed to function with 3D structured points. Clipping
2D images should be done by converting the image to polygonal data
and using vtkClipPolyData,
See Also:
vtkImplicitFunction vtkClipPolyData vtkGeometryFilter vtkExtractGeometry
"""
class vtkConeSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkConeSource - generate polygonal cone
Super Class:
vtkPolyDataAlgorithm
vtkConeSource creates a cone centered at a specified point and pointing in
a specified direction. (By default, the center is the origin and the
direction is the x-axis.) Depending upon the resolution of this object,
different representations are created. If resolution=0 a line is created;
if resolution=1, a single triangle is created; if resolution=2, two
crossed triangles are created. For resolution > 2, a 3D cone (with
resolution number of sides) is created. It also is possible to control
whether the bottom of the cone is capped with a (resolution-sided)
polygon, and to specify the height and radius of the cone.
"""
class vtkConnectivityFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkConnectivityFilter - extract data based on geometric connectivity
Super Class:
vtkUnstructuredGridAlgorithm
vtkConnectivityFilter is a filter that extracts cells that share common
points and/or meet other connectivity criterion. (Cells that share
vertices and meet other connectivity criterion such as scalar range are
known as a region.) The filter works in one of six ways: 1) extract the
largest connected region in the dataset; 2) extract specified region
numbers; 3) extract all regions sharing specified point ids; 4) extract
all regions sharing specified cell ids; 5) extract the region closest to
the specified point; or 6) extract all regions (used to color the data by
region).
vtkConnectivityFilter is generalized to handle any type of input dataset.
It generates output data of type vtkUnstructuredGrid. If you know that
your input type is vtkPolyData, you may wish to use
vtkPolyDataConnectivityFilter.
The behavior of vtkConnectivityFilter can be modified by turning on the
boolean ivar ScalarConnectivity. If this flag is on, the connectivity
algorithm is modified so that cells are considered connected only if 1)
they are geometrically connected (share a point) and 2) the scalar values
of one of the cell's points falls in the scalar range specified. This use
of ScalarConnectivity is particularly useful for volume datasets: it can
be used as a simple "connected segmentation" algorithm. For example, by
using a seed voxel (i.e., cell) on a known anatomical structure,
connectivity will pull out all voxels "containing" the anatomical
structure. These voxels can then be contoured or processed by other
visualization filters.
See Also:
vtkPolyDataConnectivityFilter
"""
class vtkContourFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkContourFilter - generate isosurfaces/isolines from scalar values
Super Class:
vtkPolyDataAlgorithm
vtkContourFilter is a filter that takes as input any dataset and
generates on output isosurfaces and/or isolines. The exact form
of the output depends upon the dimensionality of the input data.
Data consisting of 3D cells will generate isosurfaces, data
consisting of 2D cells will generate isolines, and data with 1D
or 0D cells will generate isopoints. Combinations of output type
are possible if the input dimension is mixed.
To use this filter you must specify one or more contour values.
You can either use the method SetValue() to specify each contour
value, or use GenerateValues() to generate a series of evenly
spaced contours. It is also possible to accelerate the operation of
this filter (at the cost of extra memory) by using a
vtkScalarTree. A scalar tree is used to quickly locate cells that
contain a contour surface. This is especially effective if multiple
contours are being extracted. If you want to use a scalar tree,
invoke the method UseScalarTreeOn().
Caveats:
For unstructured data or structured grids, normals and gradients
are not computed. Use vtkPolyDataNormals to compute the surface
normals.
See Also:
vtkMarchingContourFilter vtkMarchingCubes vtkSliceCubes
vtkMarchingSquares vtkImageMarchingCubes
"""
class vtkContourGrid:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkContourGrid - generate isosurfaces/isolines from scalar values (specialized for unstructured grids)
Super Class:
vtkPolyDataAlgorithm
vtkContourGrid is a filter that takes as input datasets of type
vtkUnstructuredGrid and generates on output isosurfaces and/or
isolines. The exact form of the output depends upon the dimensionality of
the input data. Data consisting of 3D cells will generate isosurfaces,
data consisting of 2D cells will generate isolines, and data with 1D or 0D
cells will generate isopoints. Combinations of output type are possible if
the input dimension is mixed.
To use this filter you must specify one or more contour values.
You can either use the method SetValue() to specify each contour
value, or use GenerateValues() to generate a series of evenly
spaced contours. It is also possible to accelerate the operation of
this filter (at the cost of extra memory) by using a
vtkScalarTree. A scalar tree is used to quickly locate cells that
contain a contour surface. This is especially effective if multiple
contours are being extracted. If you want to use a scalar tree,
invoke the method UseScalarTreeOn().
Caveats:
For unstructured data or structured grids, normals and gradients
are not computed. Use vtkPolyDataNormals to compute the surface
normals of the resulting isosurface.
See Also:
vtkMarchingContourFilter vtkKitwareContourFilter
vtkMarchingCubes vtkSliceCubes vtkDividingCubes vtkMarchingSquares
vtkImageMarchingCubes
"""
class vtkConvertSelection:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkConvertSelection -
Super Class:
vtkSelectionAlgorithm
vtkConvertSelection
.SECTION Thanks
See Also:
"""
class vtkCubeSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCubeSource - create a polygonal representation of a cube
Super Class:
vtkPolyDataAlgorithm
vtkCubeSource creates a cube centered at origin. The cube is represented
with four-sided polygons. It is possible to specify the length, width,
and height of the cube independently.
"""
class vtkCursor2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCursor2D - generate a 2D cursor representation
Super Class:
vtkPolyDataAlgorithm
vtkCursor2D is a class that generates a 2D cursor representation.
The cursor consists of two intersection axes lines that meet at the
cursor focus. Several optional features are available as well. An
optional 2D bounding box may be enabled. An inner radius, centered at
the focal point, can be set that erases the intersecting lines (e.g.,
it leaves a clear area under the focal point so you can see
what you are selecting). And finally, an optional point can be
enabled located at the focal point. All of these features can be turned
on and off independently.
"""
class vtkCurvatures:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCurvatures - compute curvatures (Gauss and mean) of a Polydata object
Super Class:
vtkPolyDataAlgorithm
vtkCurvatures takes a polydata input and computes the curvature of the
mesh at each point. Four possible methods of computation are available :
Gauss Curvature
discrete Gauss curvature (K) computation,
\f$K(vertex v) = 2*PI-\sum_{facet neighbs f of v} (angle_f at v)\f$
The contribution of every facet is for the moment weighted by \f$Area(facet)/3\f$
The units of Gaussian Curvature are \f$[1/m^2]\f$
Mean Curvature
\f$H(vertex v) = average over edges neighbs e of H(e)\f$
\f$H(edge e) = length(e)*dihedral_angle(e)\f$
NB: dihedral_angle is the ORIENTED angle between -PI and PI,
this means that the surface is assumed to be orientable
the computation creates the orientation
The units of Mean Curvature are [1/m]
Maximum (\f$k_max\f$) and Minimum (\f$k_min\f$) Principal Curvatures
\f$k_max = H + sqrt(H^2 - K)\f$
\f$k_min = H - sqrt(H^2 - K)\f$
Excepting spherical and planar surfaces which have equal principal curvatures,
the curvature at a point on a surface varies with the direction one "sets off"
from the point. For all directions, the curvature will pass through two extrema:
a minimum (\f$k_min\f$) and a maximum (\f$k_max\f$) which occur at mutually orthogonal
directions to each other.
NB. The sign of the Gauss curvature is a geometric ivariant, it should be +ve
when the surface looks like a sphere, -ve when it looks like a saddle,
however, the sign of the Mean curvature is not, it depends on the
convention for normals - This code assumes that normals point outwards (ie
from the surface of a sphere outwards). If a given mesh produces curvatures
of opposite senses then the flag InvertMeanCurvature can be set and the
Curvature reported by the Mean calculation will be inverted.
.SECTION Thanks
Philip Batchelor philipp.batchelor@kcl.ac.uk for creating and contributing
the class and Andrew Maclean a.maclean@acfr.usyd.edu.au for cleanups and
fixes. Thanks also to Goodwin Lawlor for contributing patch to calculate
principal curvatures
See Also:
"""
class vtkCutter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCutter - Cut vtkDataSet with user-specified implicit function
Super Class:
vtkPolyDataAlgorithm
vtkCutter is a filter to cut through data using any subclass of
vtkImplicitFunction. That is, a polygonal surface is created
corresponding to the implicit function F(x,y,z) = value(s), where
you can specify one or more values used to cut with.
In VTK, cutting means reducing a cell of dimension N to a cut surface
of dimension N-1. For example, a tetrahedron when cut by a plane (i.e.,
vtkPlane implicit function) will generate triangles. (In comparison,
clipping takes a N dimensional cell and creates N dimension primitives.)
vtkCutter is generally used to "slice-through" a dataset, generating
a surface that can be visualized. It is also possible to use vtkCutter
to do a form of volume rendering. vtkCutter does this by generating
multiple cut surfaces (usually planes) which are ordered (and rendered)
from back-to-front. The surfaces are set translucent to give a
volumetric rendering effect.
Note that data can be cut using either 1) the scalar values associated
with the dataset or 2) an implicit function associated with this class.
By default, if an implicit function is set it is used to clip the data
set, otherwise the dataset scalars are used to perform the clipping.
See Also:
vtkImplicitFunction vtkClipPolyData
"""
class vtkCylinderSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkCylinderSource - generate a cylinder centered at origin
Super Class:
vtkPolyDataAlgorithm
vtkCylinderSource creates a polygonal cylinder centered at Center;
The axis of the cylinder is aligned along the global y-axis.
The height and radius of the cylinder can be specified, as well as the
number of sides. It is also possible to control whether the cylinder is
open-ended or capped.
"""
class vtkDEMReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkDEMReader - read a digital elevation model (DEM) file
Super Class:
vtkImageAlgorithm
vtkDEMReader reads digital elevation files and creates image data.
Digital elevation files are produced by the
<A HREF="http://www.usgs.gov">US Geological Survey</A>.
A complete description of the DEM file is located at the USGS site.
The reader reads the entire dem file and create a vtkImageData that
contains a single scalar component that is the elevation in meters.
The spacing is also expressed in meters. A number of get methods
provide access to fields on the header.
"""
class vtkDICOMImageReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkDICOMImageReader - Reads some DICOM images
Super Class:
vtkImageReader2
DICOM (stands for Digital Imaging in COmmunications and Medicine)
is a medical image file format widely used to exchange data, provided
by various modalities.
.SECTION Warnings
This reader might eventually handle ACR-NEMA file (predecessor of the DICOM
format for medical images).
This reader does not handle encapsulated format, only plain raw file are
handled. This reader also does not handle multi-frames DICOM datasets.
See Also:
vtkBMPReader vtkPNMReader vtkTIFFReader
"""
class vtkDashedStreamLine:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDashedStreamLine - generate constant-time dashed streamline in arbitrary dataset
Super Class:
vtkStreamLine
vtkDashedStreamLine is a filter that generates a "dashed" streamline for
an arbitrary dataset. The streamline consists of a series of dashes, each
of which represents (approximately) a constant time increment. Thus, in the
resulting visual representation, relatively long dashes represent areas of
high velocity, and small dashes represent areas of low velocity.
vtkDashedStreamLine introduces the instance variable DashFactor.
DashFactor interacts with its superclass' instance variable StepLength to
create the dashes. DashFactor is the percentage of the StepLength line
segment that is visible. Thus, if the DashFactor=0.75, the dashes will be
"three-quarters on" and "one-quarter off".
See Also:
vtkStreamer vtkStreamLine vtkStreamPoints
"""
class vtkDataObjectAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDataObjectAlgorithm - Superclass for algorithms that produce only data object as output
Super Class:
vtkAlgorithm
"""
class vtkDataObjectReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkDataObjectReader - read vtk field data file
Super Class:
vtkDataReader
vtkDataObjectReader is a source object that reads ASCII or binary field
data files in vtk format. Fields are general matrix structures used
represent complex data. (See text for format details). The output of this
reader is a single vtkDataObject. The superclass of this class,
vtkDataReader, provides many methods for controlling the reading of the
data file, see vtkDataReader for more information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkFieldData vtkDataObjectWriter
"""
class vtkDataObjectToDataSetFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDataObjectToDataSetFilter - map field data to concrete dataset
Super Class:
vtkDataSetAlgorithm
vtkDataObjectToDataSetFilter is an class that maps a data object (i.e., a field)
into a concrete dataset, i.e., gives structure to the field by defining a
geometry and topology.
To use this filter you associate components in the input field data with
portions of the output dataset. (A component is an array of values from
the field.) For example, you would specify x-y-z points by assigning
components from the field for the x, then y, then z values of the points.
You may also have to specify component ranges (for each z-y-z) to make
sure that the number of x, y, and z values is the same. Also, you may
want to normalize the components which helps distribute the data
uniformly. Once you've setup the filter to combine all the pieces of
data into a specified dataset (the geometry, topology, point and cell
data attributes), the various output methods (e.g., GetPolyData()) are
used to retrieve the final product.
This filter is often used in conjunction with
vtkFieldDataToAttributeDataFilter. vtkFieldDataToAttributeDataFilter
takes field data and transforms it into attribute data (e.g., point and
cell data attributes such as scalars and vectors). To do this, use this
filter which constructs a concrete dataset and passes the input data
object field data to its output. and then use
vtkFieldDataToAttributeDataFilter to generate the attribute data associated
with the dataset.
Caveats:
Make sure that the data you extract is consistent. That is, if you have N
points, extract N x, y, and z components. Also, all the information
necessary to define a dataset must be given. For example, vtkPolyData
requires points at a minimum; vtkStructuredPoints requires setting the
dimensions; vtkStructuredGrid requires defining points and dimensions;
vtkUnstructuredGrid requires setting points; and vtkRectilinearGrid
requires that you define the x, y, and z-coo ...
[Truncated]
See Also:
vtkDataObject vtkFieldData vtkDataSet vtkPolyData vtkStructuredPoints
vtkStructuredGrid vtkUnstructuredGrid vtkRectilinearGrid
vtkDataSetAttributes vtkDataArray
"""
class vtkDataObjectToTable:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDataObjectToTable - extract field data as a table
Super Class:
vtkTableAlgorithm
This filter cen extrac the field, cell or point data of any data object
as a table.
"""
class vtkDataObjectWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkDataObjectWriter - write vtk field data
Super Class:
vtkWriter
vtkDataObjectWriter is a source object that writes ASCII or binary
field data files in vtk format. Field data is a general form of data in
matrix form.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkFieldData vtkFieldDataReader
"""
class vtkDataSetAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDataSetAlgorithm - Superclass for algorithms that produce output of the same type as input
Super Class:
vtkAlgorithm
vtkDataSetAlgorithm is a convenience class to make writing algorithms
easier. It is also designed to help transition old algorithms to the new
pipeline architecture. Ther are some assumptions and defaults made by this
class you should be aware of. This class defaults such that your filter
will have one input port and one output port. If that is not the case
simply change it with SetNumberOfInputPorts etc. See this classes
contstructor for the default. This class also provides a FillInputPortInfo
method that by default says that all inputs will be DataSet. If that isn't
the case then please override this method in your subclass. This class
breaks out the downstream requests into seperate functions such as
RequestDataObject RequestData and RequestInformation. The default
implementation of RequestDataObject will create an output data of the
same type as the input.
"""
class vtkDataSetReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkDataSetReader - class to read any type of vtk dataset
Super Class:
vtkDataReader
vtkDataSetReader is a class that provides instance variables and methods
to read any type of dataset in Visualization Toolkit (vtk) format. The
output type of this class will vary depending upon the type of data
file. Convenience methods are provided to keep the data as a particular
type. (See text for format description details).
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkDataReader vtkPolyDataReader vtkRectilinearGridReader
vtkStructuredPointsReader vtkStructuredGridReader vtkUnstructuredGridReader
"""
class vtkDataSetSurfaceFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDataSetSurfaceFilter - Extracts outer (polygonal) surface.
Super Class:
vtkPolyDataAlgorithm
vtkDataSetSurfaceFilter is a faster version of vtkGeometry filter, but it
does not have an option to select bounds. It may use more memory than
vtkGeometryFilter. It only has one option: whether to use triangle strips
when the input type is structured.
See Also:
vtkGeometryFilter vtkStructuredGridGeometryFilter.
"""
class vtkDataSetToDataObjectFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDataSetToDataObjectFilter - map dataset into data object (i.e., a field)
Super Class:
vtkDataObjectAlgorithm
vtkDataSetToDataObjectFilter is an class that transforms a dataset into
data object (i.e., a field). The field will have labeled data arrays
corresponding to the topology, geometry, field data, and point and cell
attribute data.
You can control what portions of the dataset are converted into the
output data object's field data. The instance variables Geometry,
Topology, FieldData, PointData, and CellData are flags that control
whether the dataset's geometry (e.g., points, spacing, origin);
topology (e.g., cell connectivity, dimensions); the field data
associated with the dataset's superclass data object; the dataset's
point data attributes; and the dataset's cell data attributes. (Note:
the data attributes include scalars, vectors, tensors, normals, texture
coordinates, and field data.)
The names used to create the field data are as follows. For vtkPolyData,
"Points", "Verts", "Lines", "Polys", and "Strips". For vtkUnstructuredGrid,
"Cells" and "CellTypes". For vtkStructuredPoints, "Dimensions", "Spacing",
and "Origin". For vtkStructuredGrid, "Points" and "Dimensions". For
vtkRectilinearGrid, "XCoordinates", "YCoordinates", and "ZCoordinates".
for point attribute data, "PointScalars", "PointVectors", etc. For cell
attribute data, "CellScalars", "CellVectors", etc. Field data arrays retain
their original name.
See Also:
vtkDataObject vtkFieldData vtkDataObjectToDataSetFilter
"""
class vtkDataSetTriangleFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDataSetTriangleFilter - triangulate any type of dataset
Super Class:
vtkUnstructuredGridAlgorithm
vtkDataSetTriangleFilter generates n-dimensional simplices from any input
dataset. That is, 3D cells are converted to tetrahedral meshes, 2D cells
to triangles, and so on. The triangulation is guaranteed to be compatible.
This filter uses simple 1D and 2D triangulation techniques for cells
that are of topological dimension 2 or less. For 3D cells--due to the
issue of face compatibility across quadrilateral faces (which way to
orient the diagonal?)--a fancier ordered Delaunay triangulation is used.
This approach produces templates on the fly for triangulating the
cells. The templates are then used to do the actual triangulation.
See Also:
vtkOrderedTriangulator vtkTriangleFilter
"""
class vtkDataSetWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkDataSetWriter - write any type of vtk dataset to file
Super Class:
vtkDataWriter
vtkDataSetWriter is an abstract class for mapper objects that write their
data to disk (or into a communications port). The input to this object is
a dataset of any type.
"""
class vtkDecimatePro:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDecimatePro - reduce the number of triangles in a mesh
Super Class:
vtkPolyDataAlgorithm
vtkDecimatePro is a filter to reduce the number of triangles in a triangle
mesh, forming a good approximation to the original geometry. The input to
vtkDecimatePro is a vtkPolyData object, and only triangles are treated. If
you desire to decimate polygonal meshes, first triangulate the polygons
with vtkTriangleFilter object.
The implementation of vtkDecimatePro is similar to the algorithm
originally described in "Decimation of Triangle Meshes", Proc Siggraph
`92, with three major differences. First, this algorithm does not
necessarily preserve the topology of the mesh. Second, it is guaranteed to
give the a mesh reduction factor specified by the user (as long as certain
constraints are not set - see Caveats). Third, it is set up generate
progressive meshes, that is a stream of operations that can be easily
transmitted and incrementally updated (see Hugues Hoppe's Siggraph '96
paper on progressive meshes).
The algorithm proceeds as follows. Each vertex in the mesh is classified
and inserted into a priority queue. The priority is based on the error to
delete the vertex and retriangulate the hole. Vertices that cannot be
deleted or triangulated (at this point in the algorithm) are
skipped. Then, each vertex in the priority queue is processed (i.e.,
deleted followed by hole triangulation using edge collapse). This
continues until the priority queue is empty. Next, all remaining vertices
are processed, and the mesh is split into separate pieces along sharp
edges or at non-manifold attachment points and reinserted into the
priority queue. Again, the priority queue is processed until empty. If
the desired reduction is still not achieved, the remaining vertices are
split as necessary (in a recursive fashion) so that it is possible to
eliminate every triangle as necessary.
To use this object, at a minimum you need to specify the ivar
TargetReduction. The algorithm is guaranteed to generate a reduced mesh
at this level as long as the following four conditions are met: 1)
topology modification is allowed (i.e., the ivar PreserveTopology is off);
2) mesh splitting is enabled (i.e., the ivar Splitting is on); 3) the
algorithm is allowed to modify the boundary of the mesh (i.e., the ivar
BoundaryVertexDeletion is on); and 4) the maximum allowable error (i.e.,
the ivar MaximumError) is set to VTK_DOUBLE_MAX. Other important
parameters to adjust include the FeatureAngle and SplitAngle ivars, since
these can impact the quality of the final mesh. Also, you can set the
ivar AccumulateError to force incremental error update and distribution
to surrounding vertices as each vertex is deleted. The accumulated error
is a conservative global error bounds and decimation error, but requires
additional memory and time to compute.
Caveats:
To guarantee a given level of reduction, the ivar PreserveTopology must
be off; the ivar Splitting is on; the ivar BoundaryVertexDeletion is on;
and the ivar MaximumError is set to VTK_DOUBLE_MAX.
If PreserveTopology is off, and SplitEdges is off; the mesh topology may
be modified by closing holes.
Once mesh splitting begins, the feature angle is set to the split angle.
See Also:
vtkDecimate vtkQuadricClustering vtkQuadricDecimation
"""
class vtkDelaunay2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDelaunay2D - create 2D Delaunay triangulation of input points
Super Class:
vtkPolyDataAlgorithm
vtkDelaunay2D is a filter that constructs a 2D Delaunay triangulation from
a list of input points. These points may be represented by any dataset of
type vtkPointSet and subclasses. The output of the filter is a polygonal
dataset. Usually the output is a triangle mesh, but if a non-zero alpha
distance value is specified (called the "alpha" value), then only
triangles, edges, and vertices lying within the alpha radius are
output. In other words, non-zero alpha values may result in arbitrary
combinations of triangles, lines, and vertices. (The notion of alpha value
is derived from Edelsbrunner's work on "alpha shapes".) Also, it is
possible to generate "constrained triangulations" using this filter.
A constrained triangulation is one where edges and loops (i.e., polygons)
can be defined and the triangulation will preserve them (read on for
more information).
The 2D Delaunay triangulation is defined as the triangulation that
satisfies the Delaunay criterion for n-dimensional simplexes (in this case
n=2 and the simplexes are triangles). This criterion states that a
circumsphere of each simplex in a triangulation contains only the n+1
defining points of the simplex. (See "The Visualization Toolkit" text
for more information.) In two dimensions, this translates into an optimal
triangulation. That is, the maximum interior angle of any triangle is less
than or equal to that of any possible triangulation.
Delaunay triangulations are used to build topological structures
from unorganized (or unstructured) points. The input to this filter
is a list of points specified in 3D, even though the triangulation
is 2D. Thus the triangulation is constructed in the x-y plane, and
the z coordinate is ignored (although carried through to the
output). If you desire to triangulate in a different plane, you
can use the vtkTransformFilter to transform the points into and
out of the x-y plane or you can specify a transform to the Delaunay2D
directly. In the latter case, the input points are transformed, the
transformed points are triangulated, and the output will use the
triangulated topology for the original (non-transformed) points. This
avoids transforming the data back as would be required when using the
vtkTransformFilter method. Specifying a transform directly also allows
any transform to be used: rigid, non-rigid, non-invertible, etc.
If an input transform is used, then alpha values are applied (for the
most part) in the original data space. The exception is when
BoundingTriangulation is on. In this case, alpha values are applied in
the original data space unless a cell uses a bounding vertex.
The Delaunay triangulation can be numerically sensitive in some cases. To
prevent problems, try to avoid injecting points that will result in
triangles with bad aspect ratios (1000:1 or greater). In practice this
means inserting points that are "widely dispersed", and enables smooth
transition of triangle sizes throughout the mesh. (You may even want to
add extra points to create a better point distribution.) If numerical
problems are present, you will see a warning message to this effect at
the end of the triangulation process.
To create constrained meshes, you must define an additional
input. This input is an instance of vtkPolyData which contains
lines, polylines, and/or polygons that define constrained edges and
loops. Only the topology of (lines and polygons) from this second
input are used. The topology is assumed to reference points in the
input point set (the one to be triangulated). In other words, the
lines and polygons use point ids from the first input point
set. Lines and polylines found in the input will be mesh edges in
the output. Polygons define a loop with inside and outside
regions. The inside of the polygon is determined by using the
right-hand-rule, i.e., looking down the z-axis a polygon should be
ordered counter-clockwise. Holes in a polygon should be ordered
clockwise. If you choose to create a constrained triangulation, the
final mesh may not satisfy the Delaunay criterion. (Noted: the
lines/polygon edges must not intersect when projected onto the 2D
plane. It may not be possible to recover all edges due to not
enough points in the triangulation, or poorly defined edges
(coincident or excessively long). The form of the lines or
polygons is a list of point ids that correspond to the input point
ids used to generate the triangulation.)
If an input transform is used, constraints are defined in the
"transformed" space. So when the right hand rule is used for a
polygon constraint, that operation is applied using the transformed
points. Since the input transform can be any transformation (rigid
or non-rigid), care must be taken in constructing constraints when
an input transform is used.
Caveats:
Points arranged on a regular lattice (termed degenerate cases) can be
triangulated in more than one way (at least according to the Delaunay
criterion). The choice of triangulation (as implemented by
this algorithm) depends on the order of the input points. The first three
points will form a triangle; other degenerate points will not break
this triangle.
Points that are coincident (or nearly so) may be discarded by the algorithm.
This is because the Delaun ...
[Truncated]
See Also:
vtkDelaunay3D vtkTransformFilter vtkGaussianSplatter
"""
class vtkDelaunay3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDelaunay3D - create 3D Delaunay triangulation of input points
Super Class:
vtkUnstructuredGridAlgorithm
vtkDelaunay3D is a filter that constructs a 3D Delaunay
triangulation from a list of input points. These points may be
represented by any dataset of type vtkPointSet and subclasses. The
output of the filter is an unstructured grid dataset. Usually the
output is a tetrahedral mesh, but if a non-zero alpha distance
value is specified (called the "alpha" value), then only tetrahedra,
triangles, edges, and vertices lying within the alpha radius are
output. In other words, non-zero alpha values may result in arbitrary
combinations of tetrahedra, triangles, lines, and vertices. (The notion
of alpha value is derived from Edelsbrunner's work on "alpha shapes".)
The 3D Delaunay triangulation is defined as the triangulation that
satisfies the Delaunay criterion for n-dimensional simplexes (in
this case n=3 and the simplexes are tetrahedra). This criterion
states that a circumsphere of each simplex in a triangulation
contains only the n+1 defining points of the simplex. (See text for
more information.) While in two dimensions this translates into an
"optimal" triangulation, this is not true in 3D, since a measurement
for optimality in 3D is not agreed on.
Delaunay triangulations are used to build topological structures
from unorganized (or unstructured) points. The input to this filter
is a list of points specified in 3D. (If you wish to create 2D
triangulations see vtkDelaunay2D.) The output is an unstructured grid.
The Delaunay triangulation can be numerically sensitive. To prevent
problems, try to avoid injecting points that will result in
triangles with bad aspect ratios (1000:1 or greater). In practice
this means inserting points that are "widely dispersed", and
enables smooth transition of triangle sizes throughout the
mesh. (You may even want to add extra points to create a better
point distribution.) If numerical problems are present, you will
see a warning message to this effect at the end of the
triangulation process.
Caveats:
Points arranged on a regular lattice (termed degenerate cases) can be
triangulated in more than one way (at least according to the Delaunay
criterion). The choice of triangulation (as implemented by
this algorithm) depends on the order of the input points. The first four
points will form a tetrahedron; other degenerate points (relative to this
initial tetrahedron) will not break it.
Points that are coincident (or nearly so) may be discarded by the
algorit ...
[Truncated]
See Also:
vtkDelaunay2D vtkGaussianSplatter vtkUnstructuredGrid
"""
class vtkDelimitedTextReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkDelimitedTextReader - reader for pulling in flat text files
Super Class:
vtkTableAlgorithm
vtkDelimitedTextReader is an interface for pulling in data from a
flat, delimited text file (delimiter can be any character).
This class emits ProgressEvent for every 100 lines it reads.
.SECTION Thanks
Thanks to Andy Wilson and Brian Wylie from Sandia National Laboratories
for implementing this class.
Caveats:
This reader assumes that the first line in the file (whether that's
headers or the first document) contains at least as many fields as
any other line in the file.
"""
class vtkDepthSortPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDepthSortPolyData - sort poly data along camera view direction
Super Class:
vtkPolyDataAlgorithm
vtkDepthSortPolyData rearranges the order of cells so that certain
rendering operations (e.g., transparency or Painter's algorithms)
generate correct results. To use this filter you must specify the
direction vector along which to sort the cells. You can do this by
specifying a camera and/or prop to define a view direction; or
explicitly set a view direction.
Caveats:
The sort operation will not work well for long, thin primitives, or cells
that intersect, overlap, or interpenetrate each other.
"""
class vtkDijkstraGraphGeodesicPath:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDijkstraGraphGeodesicPath - Dijkstra algorithm to compute the graph geodesic.
Super Class:
vtkGraphGeodesicPath
Takes as input a polygonal mesh and performs a single source shortest
path calculation. Dijkstra's algorithm is used. The implementation is
similar to the one described in Introduction to Algorithms (Second Edition)
by Thomas H. Cormen, Charles E. Leiserson, Ronald L. Rivest, and
Cliff Stein, published by MIT Press and McGraw-Hill. Some minor
enhancement are added though. All vertices are not pushed on the heap
at start, instead a front set is maintained. The heap is implemented as
a binary heap. The output of the filter is a set of lines describing
the shortest path from StartVertex to EndVertex.
Caveats:
The input polydata must have only triangle cells.
.SECTION Thanks
The class was contributed by Rasmus Paulsen.
www.imm.dtu.dk/~rrp/VTK . Also thanks to Alexandre Gouaillard and Shoaib
Ghias for bug fixes and enhancements.
"""
class vtkDiscreteMarchingCubes:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDiscreteMarchingCubes - generate object boundaries from
Super Class:
vtkMarchingCubes
takes as input a volume (e.g., 3D structured point set) of
segmentation labels and generates on output one or more
models representing the boundaries between the specified label and
the adjacent structures. One or more label values must be specified to
generate the models. The boundary positions are always defined to
be half-way between adjacent voxels. This filter works best with
integral scalar values.
If ComputeScalars is on (the default), each output cell will have
cell data that corresponds to the scalar value (segmentation label)
of the corresponding cube. Note that this differs from vtkMarchingCubes,
which stores the scalar value as point data. The rationale for this
difference is that cell vertices may be shared between multiple
cells. This also means that the resultant polydata may be
non-manifold (cell faces may be coincident). To further process the
polydata, users should either: 1) extract cells that have a common
scalar value using vtkThreshold, or 2) process the data with
filters that can handle non-manifold polydata
(e.g. vtkWindowedSincPolyDataFilter).
Also note, Normals and Gradients are not computed.
Caveats:
This filter is specialized to volumes. If you are interested in
contouring other types of data, use the general vtkContourFilter. If you
want to contour an image (i.e., a volume slice), use vtkMarchingSquares.
See Also:
vtkContourFilter vtkSliceCubes vtkMarchingSquares vtkDividingCubes
"""
class vtkDiskSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkDiskSource - create a disk with hole in center
Super Class:
vtkPolyDataAlgorithm
vtkDiskSource creates a polygonal disk with a hole in the center. The
disk has zero height. The user can specify the inner and outer radius
of the disk, and the radial and circumferential resolution of the
polygonal representation.
See Also:
vtkLinearExtrusionFilter
"""
class vtkEarthSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkEarthSource - create the continents of the Earth as a sphere
Super Class:
vtkPolyDataAlgorithm
vtkEarthSource creates a spherical rendering of the geographical shapes
of the major continents of the earth. The OnRatio determines
how much of the data is actually used. The radius defines the radius
of the sphere at which the continents are placed. Obtains data from
an imbedded array of coordinates.
"""
class vtkEdgePoints:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkEdgePoints - generate points on isosurface
Super Class:
vtkPolyDataAlgorithm
vtkEdgePoints is a filter that takes as input any dataset and
generates for output a set of points that lie on an isosurface. The
points are created by interpolation along cells edges whose end-points are
below and above the contour value.
Caveats:
vtkEdgePoints can be considered a "poor man's" dividing cubes algorithm
(see vtkDividingCubes). Points are generated only on the edges of cells,
not in the interior, and at lower density than dividing cubes. However, it
is more general than dividing cubes since it treats any type of dataset.
"""
class vtkElevationFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkElevationFilter - generate scalars along a specified direction
Super Class:
vtkDataSetAlgorithm
vtkElevationFilter is a filter to generate scalar values from a
dataset. The scalar values lie within a user specified range, and
are generated by computing a projection of each dataset point onto
a line. The line can be oriented arbitrarily. A typical example is
to generate scalars based on elevation or height above a plane.
"""
class vtkEllipticalButtonSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkEllipticalButtonSource - create a ellipsoidal-shaped button
Super Class:
vtkButtonSource
vtkEllipticalButtonSource creates a ellipsoidal shaped button with
texture coordinates suitable for application of a texture map. This
provides a way to make nice looking 3D buttons. The buttons are
represented as vtkPolyData that includes texture coordinates and
normals. The button lies in the x-y plane.
To use this class you must define the major and minor axes lengths of an
ellipsoid (expressed as width (x), height (y) and depth (z)). The button
has a rectangular mesh region in the center with texture coordinates that
range smoothly from (0,1). (This flat region is called the texture
region.) The outer, curved portion of the button (called the shoulder) has
texture coordinates set to a user specified value (by default (0,0).
(This results in coloring the button curve the same color as the (s,t)
location of the texture map.) The resolution in the radial direction, the
texture region, and the shoulder region must also be set. The button can
be moved by specifying an origin.
See Also:
vtkButtonSource vtkRectangularButtonSource
"""
class vtkEnSight6BinaryReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkEnSight6BinaryReader - class to read binary EnSight6 files
Super Class:
vtkEnSightReader
vtkEnSight6BinaryReader is a class to read binary EnSight6 files into vtk.
Because the different parts of the EnSight data can be of various data
types, this reader produces multiple outputs, one per part in the input
file.
All variable information is being stored in field data. The descriptions
listed in the case file are used as the array names in the field data.
For complex vector variables, the description is appended with _r (for the
array of real values) and _i (for the array if imaginary values). Complex
scalar variables are stored as a single array with 2 components, real and
imaginary, listed in that order.
Caveats:
You must manually call Update on this reader and then connect the rest
of the pipeline because (due to the nature of the file format) it is
not possible to know ahead of time how many outputs you will have or
what types they will be.
This reader can only handle static EnSight datasets (both static geometry
and variables).
"""
class vtkEnSight6Reader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkEnSight6Reader - class to read EnSight6 files
Super Class:
vtkEnSightReader
vtkEnSight6Reader is a class to read EnSight6 files into vtk.
Because the different parts of the EnSight data can be of various data
types, this reader produces multiple outputs, one per part in the input
file.
All variable information is being stored in field data. The descriptions
listed in the case file are used as the array names in the field data.
For complex vector variables, the description is appended with _r (for the
array of real values) and _i (for the array if imaginary values). Complex
scalar variables are stored as a single array with 2 components, real and
imaginary, listed in that order.
Caveats:
You must manually call Update on this reader and then connect the rest
of the pipeline because (due to the nature of the file format) it is
not possible to know ahead of time how many outputs you will have or
what types they will be.
This reader can only handle static EnSight datasets (both static geometry
and variables).
"""
class vtkEnSightGoldBinaryReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkEnSightGoldBinaryReader - class to read binary EnSight Gold files
Super Class:
vtkEnSightReader
vtkEnSightGoldBinaryReader is a class to read EnSight Gold files into vtk.
Because the different parts of the EnSight data can be of various data
types, this reader produces multiple outputs, one per part in the input
file.
All variable information is being stored in field data. The descriptions
listed in the case file are used as the array names in the field data.
For complex vector variables, the description is appended with _r (for the
array of real values) and _i (for the array if imaginary values). Complex
scalar variables are stored as a single array with 2 components, real and
imaginary, listed in that order.
Caveats:
You must manually call Update on this reader and then connect the rest
of the pipeline because (due to the nature of the file format) it is
not possible to know ahead of time how many outputs you will have or
what types they will be.
This reader can only handle static EnSight datasets (both static geometry
and variables).
"""
class vtkEnSightGoldReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkEnSightGoldReader - class to read EnSight Gold files
Super Class:
vtkEnSightReader
vtkEnSightGoldReader is a class to read EnSight Gold files into vtk.
Because the different parts of the EnSight data can be of various data
types, this reader produces multiple outputs, one per part in the input
file.
All variable information is being stored in field data. The descriptions
listed in the case file are used as the array names in the field data.
For complex vector variables, the description is appended with _r (for the
array of real values) and _i (for the array if imaginary values). Complex
scalar variables are stored as a single array with 2 components, real and
imaginary, listed in that order.
Caveats:
You must manually call Update on this reader and then connect the rest
of the pipeline because (due to the nature of the file format) it is
not possible to know ahead of time how many outputs you will have or
what types they will be.
This reader can only handle static EnSight datasets (both static geometry
and variables).
"""
class vtkEnSightMasterServerReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkEnSightMasterServerReader - reader for compund EnSight files
Super Class:
vtkGenericEnSightReader
None provided.
"""
class vtkExodusIIReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkExodusIIReader - Read exodus 2 files .ex2
Super Class:
vtkUnstructuredGridAlgorithm
vtkExodusIIReader is a unstructured grid source object that reads ExodusII
files. Most of the meta data associated with the file is loaded when
UpdateInformation is called. This includes information like Title, number
of blocks, number and names of arrays. This data can be retrieved from
methods in this reader. Separate arrays that are meant to be a single
vector, are combined internally for convenience. To be combined, the array
names have to be identical except for a trailing X,Y and Z (or x,y,z). By
default cell and point arrays are not loaded. However, the user can flag
arrays to load with the methods "SetPointArrayStatus" and
"SetCellArrayStatus". The reader DOES NOT respond to piece requests
"""
class vtkExodusReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkExodusReader - Read exodus 2 files .ex2
Super Class:
vtkUnstructuredGridAlgorithm
vtkExodusReader is a unstructured grid source object that reads ExodusII
files. Most of the meta data associated with the file is loaded when
UpdateInformation is called. This includes information like Title, number
of blocks, number and names of arrays. This data can be retrieved from
methods in this reader. Separate arrays that are meant to be a single
vector, are combined internally for convenience. To be combined, the array
names have to be identical except for a trailing X,Y and Z (or x,y,z). By
default cell and point arrays are not loaded. However, the user can flag
arrays to load with the methods "SetPointArrayStatus" and
"SetCellArrayStatus". The reader DOES NOT respond to piece requests
"""
class vtkExtractArraysOverTime:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractArraysOverTime - extract point or cell data over time
Super Class:
vtkRectilinearGridAlgorithm
vtkExtractArraysOverTime extracts point or cell data of one point or
cell over time. The output is a 1D rectilinear grid where the
XCoordinates correspond to time (the same array is also copied to
a point array named Time or TimeData (if Time exists in the input).
When extracting point data, the input point coordinates are copied
to a point array named Point Coordinates or Points (if Point Coordinates
exists in the input).
This algorithm does not produce a TIME_STEPS or TIME_RANGE information
because it works across time.
.Section Caveat
vtkExtractArraysOverTime puts a vtkOnePieceExtentTranslator in the
output during RequestInformation(). As a result, the same whole
extented is produced independent of the piece request.
This algorithm works only with source that produce TIME_STEPS().
Continuous time range is not yet supported.
"""
class vtkExtractCells:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractCells - subset a vtkDataSet to create a vtkUnstructuredGrid
Super Class:
vtkUnstructuredGridAlgorithm
Given a vtkDataSet and a list of cell Ids, create a vtkUnstructuredGrid
composed of these cells. If the cell list is empty when vtkExtractCells
executes, it will set up the ugrid, point and cell arrays, with no points,
cells or data.
"""
class vtkExtractDataOverTime:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractDataOverTime - extract point data from a time sequence for
Super Class:
vtkPointSetAlgorithm
This filter extracts the point data from a time sequence and specified index
and creates an output of the same type as the input but with Points
containing "number of time steps" points; the point and PointData
corresponding to the PointIndex are extracted at each time step and added to
the output. A PointData array is added called "Time" (or "TimeData" if
there is already an array called "Time"), which is the time at each index.
"""
class vtkExtractEdges:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractEdges - extract cell edges from any type of data
Super Class:
vtkPolyDataAlgorithm
vtkExtractEdges is a filter to extract edges from a dataset. Edges
are extracted as lines or polylines.
See Also:
vtkFeatureEdges
"""
class vtkExtractGeometry:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractGeometry - extract cells that lie either entirely inside or outside of a specified implicit function
Super Class:
vtkUnstructuredGridAlgorithm
vtkExtractGeometry extracts from its input dataset all cells that are either
completely inside or outside of a specified implicit function. Any type of
dataset can be input to this filter. On output the filter generates an
unstructured grid.
To use this filter you must specify an implicit function. You must also
specify whethter to extract cells lying inside or outside of the implicit
function. (The inside of an implicit function is the negative values
region.) An option exists to extract cells that are neither inside or
outside (i.e., boundary).
A more efficient version of this filter is available for vtkPolyData input.
See vtkExtractPolyDataGeometry.
See Also:
vtkExtractPolyDataGeometry vtkGeometryFilter vtkExtractVOI
"""
class vtkExtractGrid:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractGrid - select piece (e.g., volume of interest) and/or subsample structured grid dataset
Super Class:
vtkStructuredGridAlgorithm
vtkExtractGrid is a filter that selects a portion of an input structured
grid dataset, or subsamples an input dataset. (The selected portion of
interested is referred to as the Volume Of Interest, or VOI.) The output of
this filter is a structured grid dataset. The filter treats input data of
any topological dimension (i.e., point, line, image, or volume) and can
generate output data of any topological dimension.
To use this filter set the VOI ivar which are i-j-k min/max indices that
specify a rectangular region in the data. (Note that these are 0-offset.)
You can also specify a sampling rate to subsample the data.
Typical applications of this filter are to extract a plane from a grid for
contouring, subsampling large grids to reduce data size, or extracting
regions of a grid with interesting data.
See Also:
vtkGeometryFilter vtkExtractGeometry vtkExtractVOI
vtkStructuredGridGeometryFilter
"""
class vtkExtractPolyDataGeometry:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractPolyDataGeometry - extract vtkPolyData cells that lies either entirely inside or outside of a specified implicit function
Super Class:
vtkPolyDataAlgorithm
vtkExtractPolyDataGeometry extracts from its input vtkPolyData all cells
that are either completely inside or outside of a specified implicit
function. This filter is specialized to vtkPolyData. On output the
filter generates vtkPolyData.
To use this filter you must specify an implicit function. You must also
specify whether to extract cells lying inside or outside of the implicit
function. (The inside of an implicit function is the negative values
region.) An option exists to extract cells that are neither inside nor
outside (i.e., boundary).
A more general version of this filter is available for arbitrary
vtkDataSet input (see vtkExtractGeometry).
See Also:
vtkExtractGeometry vtkClipPolyData
"""
class vtkExtractRectilinearGrid:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractRectilinearGrid - Extract a sub grid (VOI) from the structured rectilinear dataset.
Super Class:
vtkRectilinearGridAlgorithm
vtkExtractRectilinearGrid rounds out the set of filters that extract
a subgrid out of a larger structured data set. RIght now, this filter
only supports extracting a VOI. In the future, it might support
strides like the vtkExtract grid filter.
See Also:
vtkExtractGrid vtkImageClip vtkGeometryFilter vtkExtractGeometry vtkExtractVOI
vtkStructuredGridGeometryFilter
"""
class vtkExtractSelectedFrustum:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractSelectedFrustum - Returns the portion of the input dataset that
Super Class:
vtkDataSetAlgorithm
This class intersects the input DataSet with a frustum and determines which
cells and points lie within the frustum. The frustum is defined with a
vtkPlanes containing six cutting planes. The output is a DataSet that is
either a shallow copy of the input dataset with two new "vtkInsidedness"
attribute arrays, or a completely new UnstructuredGrid that contains only
the cells and points of the input that are inside the frustum. The
PassThrough flag controls which occurs. When PassThrough is off
this filter adds a scalar array called vtkOriginalCellIds that says what
input cell produced each output cell. This is an example of a Pedigree ID
which helps to trace back results.
See Also:
vtkExtractGeometry, vtkAreaPicker, vtkExtractSelection, vtkSelection
"""
class vtkExtractSelectedGraph:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractSelectedGraph - return a subgraph of a vtkGraph
Super Class:
vtkGraphAlgorithm
The first input is a vtkGraph to take a subgraph from.
The second input is a vtkSelection containing the selected indices.
The vtkSelection may have FIELD_TYPE set to POINTS (a vertex selection)
or CELLS (an edge selection). A vertex selection preserves all edges
that connect selected vertices. An edge selection preserves all vertices
that are adjacent to at least one selected edge. Alternately, you may
indicate that an edge selection should maintain the full set of vertices,
by turning RemoveIsolatedVertices off.
"""
class vtkExtractSelectedIds:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractSelectedIds - extract a list of cells from a dataset
Super Class:
vtkDataSetAlgorithm
vtkExtractSelectedIds extracts a set of cells and points from within a
vtkDataSet. The set of ids to extract are listed within a vtkSelection.
This filter adds a scalar array called vtkOriginalCellIds that says what
input cell produced each output cell. This is an example of a Pedigree ID
which helps to trace back results. Depending on whether the selection has
GLOBALIDS, VALUES or INDICES, the selection will use the contents of the
array named in the GLOBALIDS DataSetAttribute, and arbitrary array, or the
position (tuple id or number) within the cell or point array.
See Also:
vtkSelection vtkExtractSelection
"""
class vtkExtractSelectedLocations:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractSelectedLocations - extract cells within a dataset that
Super Class:
vtkDataSetAlgorithm
vtkExtractSelectedLocations extracts all cells whose volume contain at least
one point listed in the LOCATIONS content of the vtkSelection. This filter
adds a scalar array called vtkOriginalCellIds that says what input cell
produced each output cell. This is an example of a Pedigree ID which helps
to trace back results.
See Also:
vtkSelection vtkExtractSelection
"""
class vtkExtractSelectedPolyDataIds:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractSelectedPolyDataIds - extract a list of cells from a polydata
Super Class:
vtkPolyDataAlgorithm
vtkExtractSelectedPolyDataIds extracts all cells in vtkSelection from a
vtkPolyData.
See Also:
vtkSelection
"""
class vtkExtractSelectedThresholds:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractSelectedThresholds - extract a cells or points from a
Super Class:
vtkDataSetAlgorithm
vtkExtractSelectedThresholds extracts all cells and points with attribute
values that lie within a vtkSelection's THRESHOLD contents. The selecion
can specify to threshold a particular array within either the point or cell
attribute data of the input. This is similar to vtkThreshold
but allows mutliple thresholds ranges.
This filter adds a scalar array called vtkOriginalCellIds that says what
input cell produced each output cell. This is an example of a Pedigree ID
which helps to trace back results.
See Also:
vtkSelection vtkExtractSelection vtkThreshold
"""
class vtkExtractSelection:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractSelection - extract a subset from a vtkDataSet.
Super Class:
vtkDataSetAlgorithm
vtkExtractSelection extracts some subset of cells and points from
its input dataset. The subset is described by the contents of the
vtkSelection on its first input port. The dataset is given on its
second input port. Depending on the content of the vtkSelection,
this will use either a vtkExtractSelectedIds, vtkExtractSelectedFrustum
vtkExtractSelectedLocations or a vtkExtractSelectedThreshold to perform
the extraction.
See Also:
vtkSelection vtkExtractSelectedIds vtkExtractSelectedFrustum
vtkExtractSelectedLocations vtkExtractSelectedThresholds
"""
class vtkExtractTemporalFieldData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractTemporalFieldData - Extract temporal arrays from input field data
Super Class:
vtkRectilinearGridAlgorithm
vtkExtractTemporalFieldData extracts arrays from the input vtkFieldData.
These arrays are assumed to contain temporal data, where the nth tuple
contains the value for the nth timestep.
The output is a 1D rectilinear grid where the
XCoordinates correspond to time (the same array is also copied to
a point array named Time or TimeData (if Time exists in the input).
This algorithm does not produce a TIME_STEPS or TIME_RANGE information
because it works across time.
.Section Caveat
vtkExtractTemporalFieldData puts a vtkOnePieceExtentTranslator in the
output during RequestInformation(). As a result, the same whole
extented is produced independent of the piece request.
This algorithm works only with source that produce TIME_STEPS().
Continuous time range is not yet supported.
"""
class vtkExtractTensorComponents:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractTensorComponents - extract parts of tensor and create a scalar, vector, normal, or texture coordinates.
Super Class:
vtkDataSetAlgorithm
vtkExtractTensorComponents is a filter that extracts components of
a tensor to create a scalar, vector, normal, or texture coords. For
example, if the tensor contains components of stress, then you
could extract the normal stress in the x-direction as a scalar
(i.e., tensor component (0,0).
To use this filter, you must set some boolean flags to control
which data is extracted from the tensors, and whether you want to
pass the tensor data through to the output. Also, you must specify
the tensor component(s) for each type of data you want to
extract. The tensor component(s) is(are) specified using matrix notation
into a 3x3 matrix. That is, use the (row,column) address to specify
a particular tensor component; and if the data you are extracting
requires more than one component, use a list of addresses. (Note
that the addresses are 0-offset -> (0,0) specifies upper left
corner of the tensor.)
There are two optional methods to extract scalar data. You can
extract the determinant of the tensor, or you can extract the
effective stress of the tensor. These require that the ivar
ExtractScalars is on, and the appropriate scalar extraction mode is
set.
"""
class vtkExtractUnstructuredGrid:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractUnstructuredGrid - extract subset of unstructured grid geometry
Super Class:
vtkUnstructuredGridAlgorithm
vtkExtractUnstructuredGrid is a general-purpose filter to
extract geometry (and associated data) from an unstructured grid
dataset. The extraction process is controlled by specifying a range
of point ids, cell ids, or a bounding box (referred to as "Extent").
Those cells lying within these regions are sent to the output.
The user has the choice of merging coincident points (Merging is on)
or using the original point set (Merging is off).
Caveats:
If merging is off, the input points are copied through to the
output. This means unused points may be present in the output data.
If merging is on, then coincident points with different point attribute
values are merged.
See Also:
vtkImageDataGeometryFilter vtkStructuredGridGeometryFilter
vtkRectilinearGridGeometryFilter
vtkExtractGeometry vtkExtractVOI
"""
class vtkExtractVOI:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractVOI - select piece (e.g., volume of interest) and/or subsample structured points dataset
Super Class:
vtkImageAlgorithm
vtkExtractVOI is a filter that selects a portion of an input structured
points dataset, or subsamples an input dataset. (The selected portion of
interested is referred to as the Volume Of Interest, or VOI.) The output of
this filter is a structured points dataset. The filter treats input data
of any topological dimension (i.e., point, line, image, or volume) and can
generate output data of any topological dimension.
To use this filter set the VOI ivar which are i-j-k min/max indices that
specify a rectangular region in the data. (Note that these are 0-offset.)
You can also specify a sampling rate to subsample the data.
Typical applications of this filter are to extract a slice from a volume
for image processing, subsampling large volumes to reduce data size, or
extracting regions of a volume with interesting data.
See Also:
vtkGeometryFilter vtkExtractGeometry vtkExtractGrid
"""
class vtkExtractVectorComponents:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkExtractVectorComponents - extract components of vector as separate scalars
Super Class:
vtkDataSetAlgorithm
vtkExtractVectorComponents is a filter that extracts vector components as
separate scalars. This is accomplished by creating three different outputs.
Each output is the same as the input, except that the scalar values will be
one of the three components of the vector. These can be found in the
VxComponent, VyComponent, and VzComponent.
Alternatively, if the ExtractToFieldData flag is set, the filter will
put all the components in the field data. The first component will be
the scalar and the others will be non-attribute arrays.
Caveats:
This filter is unusual in that it creates multiple outputs.
If you use the GetOutput() method, you will be retrieving the x vector
component.
"""
class vtkFLUENTReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkFLUENTReader - reads a dataset in Fluent file format
Super Class:
vtkMultiBlockDataSetAlgorithm
vtkFLUENTReader creates an unstructured grid dataset. It reads .cas and
.dat files stored in FLUENT native format.
.SECTION Thanks
Thanks to Brian W. Dotson & Terry E. Jordan (Department of Energy, National
Energy Technology Laboratory) & Douglas McCorkle (Iowa State University)
who developed this class.
Please address all comments to Brian Dotson (brian.dotson@netl.doe.gov) &
Terry Jordan (terry.jordan@sa.netl.doe.gov)
& Doug McCorkle (mccdo@iastate.edu)
See Also:
vtkGAMBITReader
"""
class vtkFacetReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkFacetReader - reads a dataset in Facet format
Super Class:
vtkPolyDataAlgorithm
vtkFacetReader creates a poly data dataset. It reads ASCII files
stored in Facet format
The facet format looks like this:
FACET FILE ...
nparts
Part 1 name
0
npoints 0 0
p1x p1y p1z
p2x p2y p2z
...
1
Part 1 name
ncells npointspercell
p1c1 p2c1 p3c1 ... pnc1 materialnum partnum
p1c2 p2c2 p3c2 ... pnc2 materialnum partnum
...
"""
class vtkFacetWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkFacetWriter - reads a dataset in Facet format
Super Class:
vtkPolyDataAlgorithm
vtkFacetWriter creates an unstructured grid dataset. It reads ASCII files
stored in Facet format
The facet format looks like this:
FACET FILE ...
nparts
Part 1 name
0
npoints 0 0
p1x p1y p1z
p2x p2y p2z
...
1
Part 1 name
ncells npointspercell
p1c1 p2c1 p3c1 ... pnc1 materialnum partnum
p1c2 p2c2 p3c2 ... pnc2 materialnum partnum
...
"""
class vtkFastSplatter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkFastSplatter - A splatter optimized for splatting single kernels.
Super Class:
vtkImageAlgorithm
vtkFastSplatter takes any vtkPointSet as input (of which vtkPolyData and
vtkUnstructuredGrid inherit). Each point in the data set is considered to be
an impulse. These impulses are convolved with a given splat image. In other
words, the splat image is added to the final image at every place where there
is an input point.
Note that point and cell data are thrown away. If you want a sampling
of unstructured points consider vtkGaussianSplatter or vtkShepardMethod.
Use input port 0 for the impulse data (vtkPointSet), and input port 1 for
the splat image (vtkImageData)
.SECTION Bugs
Any point outside of the extents of the image is thrown away, even if it is
close enough such that it's convolution with the splat image would overlap
the extents.
"""
class vtkFeatureEdges:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkFeatureEdges - extract boundary, non-manifold, and/or sharp edges from polygonal data
Super Class:
vtkPolyDataAlgorithm
vtkFeatureEdges is a filter to extract special types of edges from
input polygonal data. These edges are either 1) boundary (used by
one polygon) or a line cell; 2) non-manifold (used by three or more
polygons); 3) feature edges (edges used by two triangles and whose
dihedral angle > FeatureAngle); or 4) manifold edges (edges used by
exactly two polygons). These edges may be extracted in any
combination. Edges may also be "colored" (i.e., scalar values assigned)
based on edge type. The cell coloring is assigned to the cell data of
the extracted edges.
Caveats:
To see the coloring of the liens you may have to set the ScalarMode
instance variable of the mapper to SetScalarModeToUseCellData(). (This
is only a problem if there are point data scalars.)
See Also:
vtkExtractEdges
"""
class vtkFieldDataToAttributeDataFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkFieldDataToAttributeDataFilter - map field data to dataset attribute data
Super Class:
vtkDataSetAlgorithm
vtkFieldDataToAttributeDataFilter is a class that maps field data into
dataset attributes. The input to this filter is any type of dataset and
the output is the same dataset (geometry/topology) with new attribute data
(attribute data is passed through if not replaced during filter
execution).
To use this filter you must specify which field data from the input
dataset to use. There are three possibilities: the cell field data, the
point field data, or the field data associated with the data object
superclass. Then you specify which attribute data to create: either cell
attribute data or point attribute data. Finally, you must define how to
construct the various attribute data types (e.g., scalars, vectors,
normals, etc.) from the arrays and the components of the arrays from the
field data. This is done by associating components in the input field with
components making up the attribute data. For example, you would specify a
scalar with three components (RGB) by assigning components from the field
for the R, then G, then B values of the scalars. You may also have to
specify component ranges (for each R-G-B) to make sure that the number of
R, G, and B values is the same. Also, you may want to normalize the
components which helps distribute the data uniformly.
This filter is often used in conjunction with
vtkDataObjectToDataSetFilter. vtkDataObjectToDataSetFilter filter
generates dataset topology and geometry and passes its input field data
along to its output. Then this filter is used to generate the attribute
data to go along with the dataset.
Caveats:
Make sure that the data you extract is consistent. That is, if you have N
points, extract N point attributes (scalars, vectors, etc.).
See Also:
vtkFieldData vtkDataSet vtkDataObjectToDataSetFilter
vtkDataSetAttributes vtkDataArray
"""
class vtkFixedWidthTextReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkFixedWidthTextReader - reader for pulling in text files with fixed-width fields
Super Class:
vtkTableAlgorithm
vtkFixedWidthTextReader reads in a table from a text file where
each column occupies a certain number of characters.
This class emits ProgressEvent for every 100 lines it reads.
Caveats:
This first version of the reader will assume that all fields have
the same width. It also assumes that the first line in the file
has at least as many fields (i.e. at least as many characters) as
any other line in the file.
.SECTION Thanks
Thanks to Andy Wilson from Sandia National Laboratories for
implementing this class.
"""
class vtkGAMBITReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkGAMBITReader - reads a dataset in Fluent GAMBIT neutral file format
Super Class:
vtkUnstructuredGridAlgorithm
vtkGAMBITReader creates an unstructured grid dataset. It reads ASCII files
stored in GAMBIT neutral format, with optional data stored at the nodes or
at the cells of the model. A cell-based fielddata stores the material id.
See Also:
vtkAVSucdReader
"""
class vtkGESignaReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkGESignaReader - read GE Signa ximg files
Super Class:
vtkMedicalImageReader2
vtkGESignaReader is a source object that reads some GE Signa ximg files It
does support reading in pixel spacing, slice spacing and it computes an
origin for the image in millimeters. It always produces greyscale unsigned
short data and it supports reading in rectangular, packed, compressed, and
packed&compressed. It does not read in slice orientation, or position
right now. To use it you just need to specify a filename or a file prefix
and pattern.
See Also:
vtkImageReader2
"""
class vtkGaussianCubeReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkGaussianCubeReader - read ASCII Gaussian Cube Data files
Super Class:
vtkMoleculeReaderBase
vtkGaussianCubeReader is a source object that reads ASCII files following
the description in http://www.gaussian.com/00000430.htm
The FileName must be specified.
.SECTION Thanks
Dr. Jean M. Favre who developed and contributed this class.
"""
class vtkGaussianSplatter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGaussianSplatter - splat points into a volume with an elliptical, Gaussian distribution
Super Class:
vtkImageAlgorithm
vtkGaussianSplatter is a filter that injects input points into a
structured points (volume) dataset. As each point is injected, it "splats"
or distributes values to nearby voxels. Data is distributed using an
elliptical, Gaussian distribution function. The distribution function is
modified using scalar values (expands distribution) or normals
(creates ellipsoidal distribution rather than spherical).
In general, the Gaussian distribution function f(x) around a given
splat point p is given by
f(x) = ScaleFactor * exp( ExponentFactor*((r/Radius)**2) )
where x is the current voxel sample point; r is the distance |x-p|
ExponentFactor <= 0.0, and ScaleFactor can be multiplied by the scalar
value of the point p that is currently being splatted.
If points normals are present (and NormalWarping is on), then the splat
function becomes elliptical (as compared to the spherical one described
by the previous equation). The Gaussian distribution function then
becomes:
f(x) = ScaleFactor *
exp( ExponentFactor*( ((rxy/E)**2 + z**2)/R**2) )
where E is a user-defined eccentricity factor that controls the elliptical
shape of the splat; z is the distance of the current voxel sample point
along normal N; and rxy is the distance of x in the direction
prependicular to N.
This class is typically used to convert point-valued distributions into
a volume representation. The volume is then usually iso-surfaced or
volume rendered to generate a visualization. It can be used to create
surfaces from point distributions, or to create structure (i.e.,
topology) when none exists.
Caveats:
The input to this filter is any dataset type. This filter can be used
to resample any form of data, i.e., the input data need not be
unstructured.
Some voxels may never receive a contribution during the splatting process.
The final value of these points can be specified with the "NullValue"
instance variable.
See Also:
vtkShepardMethod
"""
class vtkGenericClip:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericClip - clip any dataset with an implicit function or scalar data
Super Class:
vtkUnstructuredGridAlgorithm
vtkGenericClip is a filter that any type of dataset using either
any subclass of vtkImplicitFunction, or the input scalar
data. Clipping means that it actually "cuts" through the cells of
the dataset, returning everything inside of the specified implicit
function (or greater than the scalar value) including "pieces" of
a cell. (Compare this with vtkExtractGeometry, which pulls out
entire, uncut cells.) The output of this filter is an unstructured
grid.
To use this filter, you must decide if you will be clipping with an
implicit function, or whether you will be using the input scalar
data. If you want to clip with an implicit function, you must:
1) define an implicit function
2) set it with the SetClipFunction method
3) apply the GenerateClipScalarsOn method
If a ClipFunction is not specified, or GenerateClipScalars is off
(the default), then the input's scalar data will be used to clip
the polydata.
You can also specify a scalar value, which is used to decide what is
inside and outside of the implicit function. You can also reverse the
sense of what inside/outside is by setting the InsideOut instance
variable. (The clipping algorithm proceeds by computing an implicit
function value or using the input scalar data for each point in the
dataset. This is compared to the scalar value to determine
inside/outside.)
This filter can be configured to compute a second output. The
second output is the part of the cell that is clipped away. Set the
GenerateClippedData boolean on if you wish to access this output data.
This filter has been implemented to operate on generic datasets, rather
than the typical vtkDataSet (and subclasses). vtkGenericDataSet is a more
complex cousin of vtkDataSet, typically consisting of nonlinear,
higher-order cells. To process this type of data, generic cells are
automatically tessellated into linear cells prior to isocontouring.
See Also:
vtkClipDataSet vtkClipPolyData vtkClipVolume vtkImplicitFunction
vtkGenericDataSet
"""
class vtkGenericContourFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericContourFilter - generate isocontours from input dataset
Super Class:
vtkPolyDataAlgorithm
vtkGenericContourFilter is a filter that takes as input any (generic)
dataset and generates on output isosurfaces and/or isolines. The exact
form of the output depends upon the dimensionality of the input data.
Data consisting of 3D cells will generate isosurfaces, data consisting of
2D cells will generate isolines, and data with 1D or 0D cells will
generate isopoints. Combinations of output type are possible if the input
dimension is mixed.
To use this filter you must specify one or more contour values.
You can either use the method SetValue() to specify each contour
value, or use GenerateValues() to generate a series of evenly
spaced contours. You can use ComputeNormalsOn to compute the normals
without the need of a vtkPolyDataNormals
This filter has been implemented to operate on generic datasets, rather
than the typical vtkDataSet (and subclasses). vtkGenericDataSet is a more
complex cousin of vtkDataSet, typically consisting of nonlinear,
higher-order cells. To process this type of data, generic cells are
automatically tessellated into linear cells prior to isocontouring.
See Also:
vtkContourFilter vtkGenericDataSet
"""
class vtkGenericCutter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericCutter - cut a vtkGenericDataSet with an implicit function or scalar data
Super Class:
vtkPolyDataAlgorithm
vtkGenericCutter is a filter to cut through data using any subclass of
vtkImplicitFunction. That is, a polygonal surface is created
corresponding to the implicit function F(x,y,z) = value(s), where
you can specify one or more values used to cut with.
In VTK, cutting means reducing a cell of dimension N to a cut surface
of dimension N-1. For example, a tetrahedron when cut by a plane (i.e.,
vtkPlane implicit function) will generate triangles. (In comparison,
clipping takes a N dimensional cell and creates N dimension primitives.)
vtkGenericCutter is generally used to "slice-through" a dataset, generating
a surface that can be visualized. It is also possible to use
vtkGenericCutter to do a form of volume rendering. vtkGenericCutter does
this by generating multiple cut surfaces (usually planes) which are ordered
(and rendered) from back-to-front. The surfaces are set translucent to give
a volumetric rendering effect.
This filter has been implemented to operate on generic datasets, rather
than the typical vtkDataSet (and subclasses). vtkGenericDataSet is a more
complex cousin of vtkDataSet, typically consisting of nonlinear,
higher-order cells. To process this type of data, generic cells are
automatically tessellated into linear cells prior to isocontouring.
See Also:
vtkCutter vtkImplicitFunction vtkClipPolyData vtkGenericDataSet
"""
class vtkGenericDataObjectReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkGenericDataObjectReader - class to read any type of vtk data object
Super Class:
vtkDataReader
vtkGenericDataObjectReader is a class that provides instance variables and methods
to read any type of data object in Visualization Toolkit (vtk) format. The
output type of this class will vary depending upon the type of data
file. Convenience methods are provided to return the data as a particular
type. (See text for format description details).
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkDataReader vtkGraphReader vtkPolyDataReader vtkRectilinearGridReader
vtkStructuredPointsReader vtkStructuredGridReader vtkTableReader
vtkTreeReader vtkUnstructuredGridReader
"""
class vtkGenericDataObjectWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkGenericDataObjectWriter - writes any type of vtk data object to file
Super Class:
vtkDataWriter
vtkGenericDataObjectWriter is a concrete class that writes data objects
to disk. The input to this object is any subclass of vtkDataObject.
"""
class vtkGenericDataSetTessellator:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericDataSetTessellator - tessellates generic, higher-order datasets into linear cells
Super Class:
vtkUnstructuredGridAlgorithm
See Also:
vtkGenericCellTessellator vtkGenericSubdivisionErrorMetric
"""
class vtkGenericEnSightReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkGenericEnSightReader - class to read any type of EnSight files
Super Class:
vtkMultiBlockDataSetAlgorithm
The class vtkGenericEnSightReader allows the user to read an EnSight data
set without a priori knowledge of what type of EnSight data set it is.
"""
class vtkGenericGeometryFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericGeometryFilter - extract geometry from data (or convert data to polygonal type)
Super Class:
vtkPolyDataAlgorithm
vtkGenericGeometryFilter is a general-purpose filter to extract geometry (and
associated data) from any type of dataset. Geometry is obtained as
follows: all 0D, 1D, and 2D cells are extracted. All 2D faces that are
used by only one 3D cell (i.e., boundary faces) are extracted. It also is
possible to specify conditions on point ids, cell ids, and on
bounding box (referred to as "Extent") to control the extraction process.
This filter also may be used to convert any type of data to polygonal
type. The conversion process may be less than satisfactory for some 3D
datasets. For example, this filter will extract the outer surface of a
volume or structured grid dataset. (For structured data you may want to
use vtkImageDataGeometryFilter, vtkStructuredGridGeometryFilter,
vtkExtractUnstructuredGrid, vtkRectilinearGridGeometryFilter, or
vtkExtractVOI.)
Caveats:
When vtkGenericGeometryFilter extracts cells (or boundaries of cells) it
will (by default) merge duplicate vertices. This may cause problems
in some cases. For example, if you've run vtkPolyDataNormals to
generate normals, which may split meshes and create duplicate
vertices, vtkGenericGeometryFilter will merge these points back
together. Turn merging off to prevent this from occurring.
See Also:
vtkImageDataGeometryFilter vtkStructuredGridGeometryFilter
vtkExtractGeometry vtkExtractVOI
"""
class vtkGenericGlyph3DFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericGlyph3DFilter - copy oriented and scaled glyph geometry to every input point
Super Class:
vtkPolyDataAlgorithm
vtkGenericGlyph3DFilter is a filter that copies a geometric representation (called
a glyph) to every point in the input dataset. The glyph is defined with
polygonal data from a source filter input. The glyph may be oriented
along the input vectors or normals, and it may be scaled according to
scalar data or vector magnitude. More than one glyph may be used by
creating a table of source objects, each defining a different glyph. If a
table of glyphs is defined, then the table can be indexed into by using
either scalar value or vector magnitude.
To use this object you'll have to provide an input dataset and a source
to define the glyph. Then decide whether you want to scale the glyph and
how to scale the glyph (using scalar value or vector magnitude). Next
decide whether you want to orient the glyph, and whether to use the
vector data or normal data to orient it. Finally, decide whether to use a
table of glyphs, or just a single glyph. If you use a table of glyphs,
you'll have to decide whether to index into it with scalar value or with
vector magnitude.
Caveats:
Contrary to vtkGlyph3D, the only way to specify which attributes will be
used for scaling, coloring and orienting is through SelectInputScalars(),
SelectInputVectors() and SelectInputNormals().
The scaling of the glyphs is controlled by the ScaleFactor ivar multiplied
by the scalar value at each point (if VTK_SCALE_BY_SCALAR is set), or
multiplied by the vector magnitude (if VTK_SCALE_BY_VECTOR is set),
Alternatively (if VTK_SCALE_BY_VECTORCOMPONENTS is set), ...
[Truncated]
See Also:
vtkTensorGlyph
"""
class vtkGenericOutlineFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericOutlineFilter - create wireframe outline for arbitrary
Super Class:
vtkPolyDataAlgorithm
vtkGenericOutlineFilter is a filter that generates a wireframe outline of
any generic data set. The outline consists of the twelve edges of the
generic dataset bounding box.
See Also:
vtkGenericDataSet
"""
class vtkGenericProbeFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericProbeFilter - sample data values at specified point locations
Super Class:
vtkDataSetAlgorithm
vtkGenericProbeFilter is a filter that computes point attributes (e.g., scalars,
vectors, etc.) at specified point positions. The filter has two inputs:
the Input and Source. The Input geometric structure is passed through the
filter. The point attributes are computed at the Input point positions
by interpolating into the source data. For example, we can compute data
values on a plane (plane specified as Input) from a volume (Source).
This filter can be used to resample data, or convert one dataset form into
another. For example, a generic dataset can be probed with a volume
(three-dimensional vtkImageData), and then volume rendering techniques can
be used to visualize the results. Another example: a line or curve can be
used to probe data to produce x-y plots along that line or curve.
This filter has been implemented to operate on generic datasets, rather
than the typical vtkDataSet (and subclasses). vtkGenericDataSet is a more
complex cousin of vtkDataSet, typically consisting of nonlinear,
higher-order cells. To process this type of data, generic cells are
automatically tessellated into linear cells prior to isocontouring.
See Also:
vtkGenericProbeFilter vtkProbeFilter vtkGenericDataSet
"""
class vtkGenericStreamTracer:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGenericStreamTracer - Streamline generator
Super Class:
vtkPolyDataAlgorithm
vtkGenericStreamTracer is a filter that integrates a vector field to
generate streamlines. The integration is performed using the provided
integrator. The default is second order Runge-Kutta.
vtkGenericStreamTracer generate polylines as output. Each cell (polyline)
corresponds to one streamline. The values associated with each streamline
are stored in the cell data whereas the values associated with points
are stored in point data.
Note that vtkGenericStreamTracer can integrate both forward and backward.
The length of the streamline is controlled by specifying either
a maximum value in the units of length, cell length or elapsed time
(the elapsed time is the time each particle would have traveled if
flow were steady). Otherwise, the integration terminates after exiting
the dataset or if the particle speed is reduced to a value less than
the terminal speed or when a maximum number of steps is reached.
The reason for the termination is stored in a cell array named
ReasonForTermination.
The quality of integration can be controlled by setting integration
step (InitialIntegrationStep) and in the case of adaptive solvers
the maximum error, the minimum integration step and the maximum
integration step. All of these can have units of length, cell length
or elapsed time.
The integration time, vorticity, rotation and angular velocity
are stored in point arrays named "IntegrationTime", "Vorticity",
"Rotation" and "AngularVelocity" respectively (vorticity, rotation
and angular velocity are computed only when ComputeVorticity is on).
All point attributes in the source data set are interpolated on the
new streamline points.
vtkGenericStreamTracer integrates through any type of dataset. As a result,
if the dataset contains 2D cells such as polygons or triangles, the
integration is constrained to lie on the surface defined by the 2D cells.
The starting point of traces may be defined in two different ways.
Starting from global x-y-z "position" allows you to start a single trace
at a specified x-y-z coordinate. If you specify a source object,
a trace will be generated for each point in the source that is
inside the dataset.
See Also:
vtkRibbonFilter vtkRuledSurfaceFilter vtkInitialValueProblemSolver
vtkRungeKutta2 vtkRungeKutta4 vtkRungeKutta45
"""
class vtkGeometryFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGeometryFilter - extract geometry from data (or convert data to polygonal type)
Super Class:
vtkPolyDataAlgorithm
vtkGeometryFilter is a general-purpose filter to extract geometry (and
associated data) from any type of dataset. Geometry is obtained as
follows: all 0D, 1D, and 2D cells are extracted. All 2D faces that are
used by only one 3D cell (i.e., boundary faces) are extracted. It also is
possible to specify conditions on point ids, cell ids, and on
bounding box (referred to as "Extent") to control the extraction process.
This filter also may be used to convert any type of data to polygonal
type. The conversion process may be less than satisfactory for some 3D
datasets. For example, this filter will extract the outer surface of a
volume or structured grid dataset. (For structured data you may want to
use vtkImageDataGeometryFilter, vtkStructuredGridGeometryFilter,
vtkExtractUnstructuredGrid, vtkRectilinearGridGeometryFilter, or
vtkExtractVOI.)
Caveats:
When vtkGeometryFilter extracts cells (or boundaries of cells) it
will (by default) merge duplicate vertices. This may cause problems
in some cases. For example, if you've run vtkPolyDataNormals to
generate normals, which may split meshes and create duplicate
vertices, vtkGeometryFilter will merge these points back
together. Turn merging off to prevent this from occurring.
See Also:
vtkImageDataGeometryFilter vtkStructuredGridGeometryFilter
vtkExtractGeometry vtkExtractVOI
"""
class vtkGlyph2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGlyph2D - copy oriented and scaled glyph geometry to every input point (2D specialization)
Super Class:
vtkGlyph3D
This subclass of vtkGlyph3D is a specialization to 2D. Transformations
(i.e., translation, scaling, and rotation) are constrained to the plane.
For example, rotations due to a vector are computed from the x-y
coordinates of the vector only, and are assumed to occur around the
z-axis. (See vtkGlyph3D for documentation on the interface to this
class.)
See Also:
vtkTensorGlyph vtkGlyph3D vtkProgrammableGlyphFilter
"""
class vtkGlyph3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGlyph3D - copy oriented and scaled glyph geometry to every input point
Super Class:
vtkPolyDataAlgorithm
vtkGlyph3D is a filter that copies a geometric representation (called
a glyph) to every point in the input dataset. The glyph is defined with
polygonal data from a source filter input. The glyph may be oriented
along the input vectors or normals, and it may be scaled according to
scalar data or vector magnitude. More than one glyph may be used by
creating a table of source objects, each defining a different glyph. If a
table of glyphs is defined, then the table can be indexed into by using
either scalar value or vector magnitude.
To use this object you'll have to provide an input dataset and a source
to define the glyph. Then decide whether you want to scale the glyph and
how to scale the glyph (using scalar value or vector magnitude). Next
decide whether you want to orient the glyph, and whether to use the
vector data or normal data to orient it. Finally, decide whether to use a
table of glyphs, or just a single glyph. If you use a table of glyphs,
you'll have to decide whether to index into it with scalar value or with
vector magnitude.
Caveats:
The scaling of the glyphs is controlled by the ScaleFactor ivar multiplied
by the scalar value at each point (if VTK_SCALE_BY_SCALAR is set), or
multiplied by the vector magnitude (if VTK_SCALE_BY_VECTOR is set),
Alternatively (if VTK_SCALE_BY_VECTORCOMPONENTS is set), the scaling
may be specified for x,y,z using the vector components. The
scale factor can be further controlled by enabling clamping using the
Clamping ivar. If clamping is enabled, the scale is no ...
[Truncated]
See Also:
vtkTensorGlyph
"""
class vtkGlyphSource2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGlyphSource2D - create 2D glyphs represented by vtkPolyData
Super Class:
vtkPolyDataAlgorithm
vtkGlyphSource2D can generate a family of 2D glyphs each of which lies
in the x-y plane (i.e., the z-coordinate is zero). The class is a helper
class to be used with vtkGlyph2D and vtkXYPlotActor.
To use this class, specify the glyph type to use and its
attributes. Attributes include its position (i.e., center point), scale,
color, and whether the symbol is filled or not (a polygon or closed line
sequence). You can also put a short line through the glyph running from -x
to +x (the glyph looks like it's on a line), or a cross.
"""
class vtkGradientFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGradientFilter - A general filter for gradient estimation.
Super Class:
vtkDataSetAlgorithm
Estimates the gradient of a scalar field in a data set. This class
is basically designed for unstructured data sets (i.e.
vtkUnstructuredGrid). More efficient filters exist for vtkImageData.
"""
class vtkGraphAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGraphAlgorithm - Superclass for algorithms that produce only Graph as output
Super Class:
vtkAlgorithm
vtkGraphAlgorithm is a convenience class to make writing algorithms
easier. It is also designed to help transition old algorithms to the new
pipeline edgehitecture. There are some assumptions and defaults made by this
class you should be aware of. This class defaults such that your filter
will have one input port and one output port. If that is not the case
simply change it with SetNumberOfInputPorts etc. See this class
constructor for the default. This class also provides a FillInputPortInfo
method that by default says that all inputs will be Graph. If that
isn't the case then please override this method in your subclass. This
class breaks out the downstream requests into separate functions such as
ExecuteData and ExecuteInformation. For new algorithms you should
implement RequestData( request, inputVec, outputVec) but for older filters
there is a default implementation that calls the old ExecuteData(output)
signature. For even older filters that don't implement ExecuteData the
default implementation calls the even older Execute() signature.
.SECTION Thanks
Thanks to Patricia Crossno, Ken Moreland, Andrew Wilson and Brian Wylie from
Sandia National Laboratories for their help in developing this class.
"""
class vtkGraphHierarchicalBundle:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGraphHierarchicalBundle - layout graph arcs in bundles
Super Class:
vtkPolyDataAlgorithm
This algorithm creates a vtkPolyData from a vtkAbstractGraph. As opposed to
vtkGraphToPolyData, which converts each arc into a straight line, each arc
is converted to a polyline, following a tree structure. The filter requires
both a vtkAbstractGraph and vtkTree as input. The tree vertices must be a
superset of the graph vertices. A common example is when the graph vertices
correspond to the leaves of the tree, but the internal vertices of the tree
represent groupings of graph vertices. The algorithm matches the vertices
using the array "PedigreeId". The user may alternately set the
DirectMapping flag to indicate that the two structures must have directly
corresponding offsets (i.e. node i in the graph must correspond to node i in
the tree).
The vtkAbstractGraph defines the topology of the output vtkPolyData (i.e.
the connections between nodes) while the vtkTree defines the geometry (i.e.
the location of nodes and arc routes). Thus, the tree must have been
assigned vertex locations, but the graph does not need locations, in fact
they will be ignored. The edges approximately follow the path from the
source to target nodes in the tree. A bundling parameter controls how
closely the edges are bundled together along the tree structure.
You may follow this algorithm with vtkSplineFilter in order to make nicely
curved edges.
"""
class vtkGraphLayout:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGraphLayout - layout a graph in 2 or 3 dimensions
Super Class:
vtkAbstractGraphAlgorithm
This class is a shell for many graph layout strategies which may be set
using the SetLayoutStrategy() function. The layout strategies do the
actual work.
.SECION Thanks
Thanks to Brian Wylie from Sandia National Laboratories for adding incremental
layout capabilities.
"""
class vtkGraphLayoutFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGraphLayoutFilter - nice layout of undirected graphs in 3D
Super Class:
vtkPolyDataAlgorithm
vtkGraphLayoutFilter will reposition a network of nodes, connected by
lines or polylines, into a more pleasing arrangement. The class
implements a simple force-directed placement algorithm
(Fruchterman & Reingold "Graph Drawing by Force-directed Placement"
Software-Practice and Experience 21(11) 1991).
The input to the filter is a vtkPolyData representing the undirected
graphs. A graph is represented by a set of polylines and/or lines.
The output is also a vtkPolyData, where the point positions have been
modified. To use the filter, specify whether you wish the layout to
occur in 2D or 3D; the bounds in which the graph should lie (note that you
can just use automatic bounds computation); and modify the cool down
rate (controls the final process of simulated annealing).
"""
class vtkGraphReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkGraphReader - read vtkGraph data file
Super Class:
vtkDataReader
vtkGraphReader is a source object that reads ASCII or binary
vtkGraph data files in vtk format. (see text for format details).
The output of this reader is a single vtkGraph data object.
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkGraph vtkDataReader vtkGraphWriter
"""
class vtkGraphToPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGraphToPolyData - convert a vtkGraph to vtkPolyData
Super Class:
vtkPolyDataAlgorithm
Converts a vtkGraph to a vtkPolyData. This assumes that the points
of the graph have already been filled (perhaps by vtkGraphLayout),
and coverts all the edge of the graph into lines in the polydata.
The vertex data is passed along to the point data, and the edge data
is passed along to the cell data.
Only the owned graph edges (i.e. edges with ghost level 0) are copied
into the vtkPolyData.
"""
class vtkGraphWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkGraphWriter - write vtkGraph data to a file
Super Class:
vtkDataWriter
vtkGraphWriter is a sink object that writes ASCII or binary
vtkGraph data files in vtk format. See text for format details.
Caveats:
Binary files written on one system may not be readable on other systems.
"""
class vtkGreedyTerrainDecimation:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGreedyTerrainDecimation - reduce height field (represented as image) to reduced TIN
Super Class:
vtkPolyDataAlgorithm
vtkGreedyTerrainDecimation approximates a height field with a triangle
mesh (triangulated irregular network - TIN) using a greedy insertion
algorithm similar to that described by Garland and Heckbert in their paper
"Fast Polygonal Approximations of Terrain and Height Fields" (Technical
Report CMU-CS-95-181). The input to the filter is a height field
(represented by a image whose scalar values are height) and the output of
the filter is polygonal data consisting of triangles. The number of
triangles in the output is reduced in number as compared to a naive
tessellation of the input height field. This filter copies point data
from the input to the output for those points present in the output.
An brief description of the algorithm is as follows. The algorithm uses a
top-down decimation approach that initially represents the height field
with two triangles (whose vertices are at the four corners of the
image). These two triangles form a Delaunay triangulation. In an iterative
fashion, the point in the image with the greatest error (as compared to
the original height field) is injected into the triangulation. (Note that
the single point with the greatest error per triangle is identified and
placed into a priority queue. As the triangulation is modified, the errors
from the deleted triangles are removed from the queue, error values from
the new triangles are added.) The point whose error is at the top of the
queue is added to the triangulaion modifying it using the standard
incremental Delaunay point insertion (see vtkDelaunay2D) algorithm. Points
are repeatedly inserted until the appropriate (user-specified) error
criterion is met.
To use this filter, set the input and specify the error measure to be
used. The error measure options are 1) the absolute number of triangles
to be produced; 2) a fractional reduction of the mesh (numTris/maxTris)
where maxTris is the largest possible number of triangles
2*(dims[0]-1)*(dims[1]-1); 3) an absolute measure on error (maximum
difference in height field to reduced TIN); and 4) relative error (the
absolute error is normalized by the diagonal of the bounding box of the
height field).
Caveats:
This algorithm requires the entire input dataset to be in memory, hence it
may not work for extremely large images. Invoking BoundaryVertexDeletionOff
will allow you to stitch together images with matching boundaries.
The input height image is assumed to be positioned in the x-y plane so the
scalar value is the z-coordinate, height value.
See Also:
vtkDecimatePro vtkQuadricDecimation vtkQuadricClustering
"""
class vtkGridSynchronizedTemplates3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGridSynchronizedTemplates3D - generate isosurface from structured grids
Super Class:
vtkPolyDataAlgorithm
vtkGridSynchronizedTemplates3D is a 3D implementation of the synchronized
template algorithm.
Caveats:
This filter is specialized to 3D grids.
See Also:
vtkContourFilter vtkSynchronizedTemplates3D
"""
class vtkGroupLeafVertices:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkGroupLeafVertices - Filter that expands a tree, categorizing leaf vertices
Super Class:
vtkTreeAlgorithm
Use SetInputArrayToProcess(0, ...) to set the array to group on.
Currently this array must be a vtkStringArray.
"""
class vtkHedgeHog:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHedgeHog - create oriented lines from vector data
Super Class:
vtkPolyDataAlgorithm
vtkHedgeHog creates oriented lines from the input data set. Line
length is controlled by vector (or normal) magnitude times scale
factor. If VectorMode is UseNormal, normals determine the orientation
of the lines. Lines are colored by scalar data, if available.
"""
class vtkHierarchicalDataExtractDataSets:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHierarchicalDataExtractDataSets - extract a number of datasets
Super Class:
vtkMultiGroupDataExtractDataSets
Legacy class. Use vtkMultiGroupDataExtractDataSets instead.
See Also:
vtkMultiGroupDataExtractDataSets
"""
class vtkHierarchicalDataExtractLevel:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHierarchicalDataExtractLevel - extact levels between min and max
Super Class:
vtkMultiGroupDataExtractGroup
Legacy class. Use vtkMultiGroupDataExtractGroup instead.
See Also:
vtkMultiGroupDataExtractGroup
"""
class vtkHierarchicalDataGroupFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHierarchicalDataGroupFilter - collects multiple inputs into one hierarchical dataset
Super Class:
vtkMultiGroupDataGroupFilter
Legacy class. Use vtkMultiGroupDataGroupFilter instead.
See Also:
vtkMultiGroupDataGroupFilter
"""
class vtkHierarchicalDataLevelFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHierarchicalDataLevelFilter - generate scalars from levels
Super Class:
vtkMultiGroupDataGroupIdScalars
Legacy class. Use vtkMultiGroupDataGroupIdScalars instead.
See Also:
vtkMultiGroupDataGroupIdScalars
"""
class vtkHierarchicalDataSetAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHierarchicalDataSetAlgorithm - Superclass for algorithms that produce only vtkHierarchicalDataSet as output
Super Class:
vtkAlgorithm
Algorithms that take any type of data object (including composite dataset)
and produce a vtkHierarchicalDataSet in the output can subclass from this
class.
"""
class vtkHierarchicalDataSetGeometryFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHierarchicalDataSetGeometryFilter - extract geometry from hierarchical data
Super Class:
vtkMultiGroupDataGeometryFilter
Legacy class. Use vtkMultiGroupDataGeometryFilter instead.
See Also:
vtkMultiGroupDataGeometryFilter
"""
class vtkHull:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHull - produce an n-sided convex hull
Super Class:
vtkPolyDataAlgorithm
vtkHull is a filter which will produce an n-sided convex hull given a
set of n planes. (The convex hull bounds the input polygonal data.)
The hull is generated by squeezing the planes towards the input
vtkPolyData, until the planes just touch the vtkPolyData. Then,
the resulting planes are used to generate a polyhedron (i.e., hull)
that is represented by triangles.
The n planes can be defined in a number of ways including 1) manually
specifying each plane; 2) choosing the six face planes of the input's
bounding box; 3) choosing the eight vertex planes of the input's
bounding box; 4) choosing the twelve edge planes of the input's
bounding box; and/or 5) using a recursively subdivided octahedron.
Note that when specifying planes, the plane normals should point
outside of the convex region.
The output of this filter can be used in combination with vtkLODActor
to represent a levels-of-detail in the LOD hierarchy. Another use of
this class is to manually specify the planes, and then generate the
polyhedron from the planes (without squeezing the planes towards the
input). The method GenerateHull() is used to do this.
"""
class vtkHyperOctreeContourFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeContourFilter - generate isosurfaces/isolines from scalar values
Super Class:
vtkPolyDataAlgorithm
vtkContourFilter is a filter that takes as input any dataset and
generates on output isosurfaces and/or isolines. The exact form
of the output depends upon the dimensionality of the input data.
Data consisting of 3D cells will generate isosurfaces, data
consisting of 2D cells will generate isolines, and data with 1D
or 0D cells will generate isopoints. Combinations of output type
are possible if the input dimension is mixed.
To use this filter you must specify one or more contour values.
You can either use the method SetValue() to specify each contour
value, or use GenerateValues() to generate a series of evenly
spaced contours. It is also possible to accelerate the operation of
this filter (at the cost of extra memory) by using a
vtkScalarTree. A scalar tree is used to quickly locate cells that
contain a contour surface. This is especially effective if multiple
contours are being extracted. If you want to use a scalar tree,
invoke the method UseScalarTreeOn().
Caveats:
For unstructured data or structured grids, normals and gradients
are not computed. Use vtkPolyDataNormals to compute the surface
normals.
See Also:
vtkMarchingContourFilter vtkKitwareContourFilter
vtkMarchingCubes vtkSliceCubes vtkDividingCubes vtkMarchingSquares
vtkImageMarchingCubes
"""
class vtkHyperOctreeCutter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeCutter - Cut vtkHyperOctree with user-specified
Super Class:
vtkPolyDataAlgorithm
vtkHyperOctreeCutter is a filter to cut through data using any subclass of
vtkImplicitFunction. That is, a polygonal surface is created
corresponding to the implicit function F(x,y,z) = value(s), where
you can specify one or more values used to cut with.
In VTK, cutting means reducing a cell of dimension N to a cut surface
of dimension N-1. For example, a tetrahedron when cut by a plane (i.e.,
vtkPlane implicit function) will generate triangles. (In comparison,
clipping takes a N dimensional cell and creates N dimension primitives.)
vtkHyperOctreeCutter is generally used to "slice-through" a dataset,
generating a surface that can be visualized. It is also possible to use
vtkHyperOctreeCutter to do a form of volume rendering. vtkHyperOctreeCutter
does this by generating multiple cut surfaces (usually planes) which are
ordered (and rendered) from back-to-front. The surfaces are set translucent
to give a volumetric rendering effect.
Note that data can be cut using either 1) the scalar values associated
with the dataset or 2) an implicit function associated with this class.
By default, if an implicit function is set it is used to cut the data
set, otherwise the dataset scalars are used to perform the cut.
See Also:
vtkImplicitFunction vtkHyperOctree
"""
class vtkHyperOctreeDepth:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeDepth - Assign tree depth attribute to each cell.
Super Class:
vtkDataSetAlgorithm
This filter returns a shallow copy of its input HyperOctree with a new
data attribute field containing the depth of each cell.
See Also:
vtkHyperOctree
"""
class vtkHyperOctreeDualGridContourFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeDualGridContourFilter - generate isosurfaces/isolines from scalar values
Super Class:
vtkPolyDataAlgorithm
use of unsigned short to hold level index limits tree depth to 16.
To use this filter you must specify one or more contour values.
You can either use the method SetValue() to specify each contour
value, or use GenerateValues() to generate a series of evenly
spaced contours. It is also possible to accelerate the operation of
this filter (at the cost of extra memory) by using a
vtkScalarTree. A scalar tree is used to quickly locate cells that
contain a contour surface. This is especially effective if multiple
contours are being extracted. If you want to use a scalar tree,
invoke the method UseScalarTreeOn().
See Also:
vtkMarchingContourFilter vtkKitwareContourFilter
vtkMarchingCubes vtkSliceCubes vtkDividingCubes vtkMarchingSquares
vtkImageMarchingCubes
"""
class vtkHyperOctreeFractalSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeFractalSource - Create an octree from a fractal.
Super Class:
vtkHyperOctreeAlgorithm
See Also:
vtkHyperOctreeSampleFunction
"""
class vtkHyperOctreeLimiter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeLimiter - Limit the tree's depth, averaging data
Super Class:
vtkDataSetAlgorithm
This filter returns a lower resolution copy of its input vtkHyperOctree.
It does a length/area/volume weighted averaging to obtain data at each
cut point. Above the cut level, leaf attribute data is simply copied.
See Also:
vtkHyperOctree
"""
class vtkHyperOctreeSampleFunction:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeSampleFunction - sample an implicit function over an
Super Class:
vtkHyperOctreeAlgorithm
vtkHyperOctreeSampleFunction is a source object that evaluates an implicit
function to drive the subdivision process. The user can specify
the threshold over which a subdivision occurs, the maximum and minimum
level of subdivisions and the dimension of the hyperoctree.
See Also:
vtkSampleFunction
"""
class vtkHyperOctreeSurfaceFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeSurfaceFilter - Extracts outer (polygonal) surface.
Super Class:
vtkPolyDataAlgorithm
vtkHyperOctreeSurfaceFilter extracts the surface of an hyperoctree.
See Also:
vtkGeometryFilter vtkStructuredGridGeometryFilter.
"""
class vtkHyperOctreeToUniformGridFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperOctreeToUniformGridFilter - Flat the octree into a uniform
Super Class:
vtkImageAlgorithm
vtkHyperOctreeToUniformGridFilter creates a uniform grid with a resolution
based on the number of levels of the hyperoctree. Then, it copies celldata
in each cell of the uniform grid that belongs to an actual leaf of the
hyperoctree.
See Also:
vtkGeometryFilter vtkStructuredGridGeometryFilter.
"""
class vtkHyperStreamline:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkHyperStreamline - generate hyperstreamline in arbitrary dataset
Super Class:
vtkPolyDataAlgorithm
vtkHyperStreamline is a filter that integrates through a tensor field to
generate a hyperstreamline. The integration is along the maximum eigenvector
and the cross section of the hyperstreamline is defined by the two other
eigenvectors. Thus the shape of the hyperstreamline is "tube-like", with
the cross section being elliptical. Hyperstreamlines are used to visualize
tensor fields.
The starting point of a hyperstreamline can be defined in one of two ways.
First, you may specify an initial position. This is a x-y-z global
coordinate. The second option is to specify a starting location. This is
cellId, subId, and cell parametric coordinates.
The integration of the hyperstreamline occurs through the major eigenvector
field. IntegrationStepLength controls the step length within each cell
(i.e., this is the fraction of the cell length). The length of the
hyperstreamline is controlled by MaximumPropagationDistance. This parameter
is the length of the hyperstreamline in units of distance. The tube itself
is composed of many small sub-tubes - NumberOfSides controls the number of
sides in the tube, and StepLength controls the length of the sub-tubes.
Because hyperstreamlines are often created near regions of singularities, it
is possible to control the scaling of the tube cross section by using a
logarithmic scale. Use LogScalingOn to turn this capability on. The Radius
value controls the initial radius of the tube.
See Also:
vtkTensorGlyph vtkStreamer
"""
class vtkIVWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkIVWriter - export polydata into OpenInventor 2.0 format.
Super Class:
vtkPolyDataWriter
vtkIVWriter is a concrete subclass of vtkWriter that writes OpenInventor 2.0
files.
See Also:
vtkPolyDataWriter
"""
class vtkIdFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkIdFilter - generate scalars or field data from point and cell ids
Super Class:
vtkDataSetAlgorithm
vtkIdFilter is a filter to that generates scalars or field data
using cell and point ids. That is, the point attribute data scalars
or field data are generated from the point ids, and the cell
attribute data scalars or field data are generated from the the
cell ids.
Typically this filter is used with vtkLabeledDataMapper (and possibly
vtkSelectVisiblePoints) to create labels for points and cells, or labels
for the point or cell data scalar values.
"""
class vtkImageAccumulate:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageAccumulate - Generalized histograms up to 4 dimensions.
Super Class:
vtkImageAlgorithm
vtkImageAccumulate - This filter divides component space into
discrete bins. It then counts the number of pixels associated
with each bin. The output is this "scatter plot" (histogram values for 1D).
The dimensionality of the output depends on how many components the
input pixels have. Input pixels with one component generate a 1D histogram.
This filter can only handle images with 1 to 3 scalar components.
The input can be any type, but the output is always int.
Some statistics are computed on the pixel values at the same time.
The SetStencil and ReverseStencil
functions allow the statistics to be computed on an arbitrary
portion of the input data.
See the documentation for vtkImageStencilData for more information.
"""
class vtkImageAnisotropicDiffusion2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageAnisotropicDiffusion2D - edge preserving smoothing.
Super Class:
vtkImageSpatialAlgorithm
vtkImageAnisotropicDiffusion2D diffuses a 2d image iteratively.
The neighborhood of the diffusion is determined by the instance
flags. If "Edges" is on the 4 edge connected voxels
are included, and if "Corners" is on, the 4 corner connected voxels
are included. "DiffusionFactor" determines how far a pixel value
moves toward its neighbors, and is insensitive to the number of
neighbors chosen. The diffusion is anisotropic because it only occurs
when a gradient measure is below "GradientThreshold". Two gradient measures
exist and are toggled by the "GradientMagnitudeThreshold" flag.
When "GradientMagnitudeThreshold" is on, the magnitude of the gradient,
computed by central differences, above "DiffusionThreshold"
a voxel is not modified. The alternative measure examines each
neighbor independently. The gradient between the voxel and the neighbor
must be below the "DiffusionThreshold" for diffusion to occur with
THAT neighbor.
See Also:
vtkImageAnisotropicDiffusion3D
"""
class vtkImageAnisotropicDiffusion3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageAnisotropicDiffusion3D - edge preserving smoothing.
Super Class:
vtkImageSpatialAlgorithm
vtkImageAnisotropicDiffusion3D diffuses an volume iteratively.
The neighborhood of the diffusion is determined by the instance
flags. if "Faces" is on, the 6 voxels adjoined by faces are included
in the neighborhood. If "Edges" is on the 12 edge connected voxels
are included, and if "Corners" is on, the 8 corner connected voxels
are included. "DiffusionFactor" determines how far a pixel value
moves toward its neighbors, and is insensitive to the number of
neighbors chosen. The diffusion is anisotropic because it only occurs
when a gradient measure is below "GradientThreshold". Two gradient measures
exist and are toggled by the "GradientMagnitudeThreshold" flag.
When "GradientMagnitudeThreshold" is on, the magnitude of the gradient,
computed by central differences, above "DiffusionThreshold"
a voxel is not modified. The alternative measure examines each
neighbor independently. The gradient between the voxel and the neighbor
must be below the "DiffusionThreshold" for diffusion to occur with
THAT neighbor.
See Also:
vtkImageAnisotropicDiffusion2D
"""
class vtkImageAppend:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageAppend - Collects data from multiple inputs into one image.
Super Class:
vtkThreadedImageAlgorithm
vtkImageAppend takes the components from multiple inputs and merges
them into one output. The output images are append along the "AppendAxis".
Except for the append axis, all inputs must have the same extent.
All inputs must have the same number of scalar components.
A future extension might be to pad or clip inputs to have the same extent.
The output has the same origin and spacing as the first input.
The origin and spacing of all other inputs are ignored. All inputs
must have the same scalar type.
"""
class vtkImageAppendComponents:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageAppendComponents - Collects components from two inputs into
Super Class:
vtkThreadedImageAlgorithm
vtkImageAppendComponents takes the components from two inputs and merges
them into one output. If Input1 has M components, and Input2 has N
components, the output will have M+N components with input1
components coming first.
"""
class vtkImageBlend:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageBlend - blend images together using alpha or opacity
Super Class:
vtkThreadedImageAlgorithm
vtkImageBlend takes L, LA, RGB, or RGBA images as input and blends them
according to the alpha values and/or the opacity setting for each input.
The spacing, origin, extent, and number of components of the output are
the same as those for the first input. If the input has an alpha
component, then this component is copied unchanged into the output.
In addition, if the first input has either one component or two
components i.e. if it is either L (greyscale) or LA (greyscale + alpha)
then all other inputs must also be L or LA.
Different blending modes are available:
\em Normal (default) :
This is the standard blending mode used by OpenGL and other graphics
packages. The output always has the same number of components
and the same extent as the first input. The alpha value of the first
input is not used in the blending computation, instead it is copied
directly to the output.
\code
output <- input[0]
foreach input i {
foreach pixel px {
r <- input[i](px)(alpha) * opacity[i]
f <- (255 - r)
output(px) <- output(px) * f + input(px) * r
}
}
\endcode
\em Compound :
Images are compounded together and each component is scaled by the sum of
the alpha/opacity values. Use the CompoundThreshold method to set
specify a threshold in compound mode. Pixels with opacity*alpha less
or equal than this threshold are ignored.
The alpha value of the first input, if present, is NOT copied to the alpha
value of the output. The output always has the same number of components
and the same extent as the first input.
\code
output <- 0
foreach pixel px {
sum <- 0
foreach input i {
r <- input[i](px)(alpha) * opacity(i)
sum <- sum + r
if r > threshold {
output(px) <- output(px) + input(px) * r
}
}
output(px) <- output(px) / sum
}
\endcode
"""
class vtkImageButterworthHighPass:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageButterworthHighPass - Frequency domain high pass.
Super Class:
vtkThreadedImageAlgorithm
This filter only works on an image after it has been converted to
frequency domain by a vtkImageFFT filter. A vtkImageRFFT filter
can be used to convert the output back into the spatial domain.
vtkImageButterworthHighPass the frequency components around 0 are
attenuated. Input and output are in doubles, with two components
(complex numbers).
out(i, j) = 1 / (1 + pow(CutOff/Freq(i,j), 2*Order));
See Also:
vtkImageButterworthLowPass
"""
class vtkImageButterworthLowPass:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageButterworthLowPass - Frequency domain Low pass.
Super Class:
vtkThreadedImageAlgorithm
This filter only works on an image after it has been converted to
frequency domain by a vtkImageFFT filter. A vtkImageRFFT filter
can be used to convert the output back into the spatial domain.
vtkImageButterworthLowPass the high frequency components are
attenuated. Input and output are in doubles, with two components
(complex numbers).
out(i, j) = (1 + pow(CutOff/Freq(i,j), 2*Order));
See Also:
vtkImageButterworthHighPass vtkImageFFT vtkImageRFFT
"""
class vtkImageCacheFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageCacheFilter - Caches multiple vtkImageData objects.
Super Class:
vtkImageAlgorithm
vtkImageCacheFilter keep a number of vtkImageDataObjects from previous
updates to satisfy future updates without needing to update the input. It
does not change the data at all. It just makes the pipeline more
efficient at the expense of using extra memory.
"""
class vtkImageCast:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageCast - Image Data type Casting Filter
Super Class:
vtkThreadedImageAlgorithm
vtkImageCast filter casts the input type to match the output type in
the image processing pipeline. The filter does nothing if the input
already has the correct type. To specify the "CastTo" type,
use "SetOutputScalarType" method.
See Also:
vtkImageThreshold vtkImageShiftScale
"""
class vtkImageChangeInformation:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageChangeInformation - modify spacing, origin and extent.
Super Class:
vtkImageAlgorithm
vtkImageChangeInformation modify the spacing, origin, or extent of
the data without changing the data itself. The data is not resampled
by this filter, only the information accompanying the data is modified.
"""
class vtkImageCheckerboard:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageCheckerboard - show two images at once using a checkboard pattern
Super Class:
vtkThreadedImageAlgorithm
vtkImageCheckerboard displays two images as one using a checkerboard
pattern. This filter can be used to compare two images. The
checkerboard pattern is controlled by the NumberOfDivisions
ivar. This controls the number of checkerboard divisions in the whole
extent of the image.
"""
class vtkImageCityBlockDistance:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageCityBlockDistance - 1,2 or 3D distance map.
Super Class:
vtkImageDecomposeFilter
vtkImageCityBlockDistance creates a distance map using the city block
(Manhatten) distance measure. The input is a mask. Zero values are
considered boundaries. The output pixel is the minimum of the input pixel
and the distance to a boundary (or neighbor value + 1 unit).
distance values are calculated in pixels.
The filter works by taking 6 passes (for 3d distance map): 2 along each
axis (forward and backward). Each pass keeps a running minimum distance.
For some reason, I preserve the sign if the distance. If the input
mask is initially negative, the output distances will be negative.
Distances maps can have inside (negative regions)
and outsides (positive regions).
"""
class vtkImageClip:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageClip - Reduces the image extent of the input.
Super Class:
vtkImageAlgorithm
vtkImageClip will make an image smaller. The output must have
an image extent which is the subset of the input. The filter has two
modes of operation:
1: By default, the data is not copied in this filter.
Only the whole extent is modified.
2: If ClipDataOn is set, then you will get no more that the clipped
extent.
"""
class vtkImageConstantPad:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageConstantPad - Makes image larger by padding with constant.
Super Class:
vtkImagePadFilter
vtkImageConstantPad changes the image extent of its input.
Any pixels outside of the original image extent are filled with
a constant value.
See Also:
vtkImageWrapPad vtkImageMirrorPad
"""
class vtkImageContinuousDilate3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageContinuousDilate3D - Dilate implemented as a maximum.
Super Class:
vtkImageSpatialAlgorithm
vtkImageContinuousDilate3D replaces a pixel with the maximum over
an ellipsoidal neighborhood. If KernelSize of an axis is 1, no processing
is done on that axis.
"""
class vtkImageContinuousErode3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageContinuousErode3D - Erosion implemented as a minimum.
Super Class:
vtkImageSpatialAlgorithm
vtkImageContinuousErode3D replaces a pixel with the minimum over
an ellipsoidal neighborhood. If KernelSize of an axis is 1, no processing
is done on that axis.
"""
class vtkImageConvolve:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageConvolve - Convolution of an image with a kernel.
Super Class:
vtkThreadedImageAlgorithm
vtkImageConvolve convolves the image with a 3D NxNxN kernel or a
2D NxN kernal. The output image is cropped to the same size as
the input.
"""
class vtkImageCorrelation:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageCorrelation - Correlation imageof the two inputs.
Super Class:
vtkThreadedImageAlgorithm
vtkImageCorrelation finds the correlation between two data sets.
SetDimensionality determines
whether the Correlation will be 3D, 2D or 1D.
The default is a 2D Correlation. The Output type will be double.
The output size will match the size of the first input.
The second input is considered the correlation kernel.
"""
class vtkImageCursor3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageCursor3D - Paints a cursor on top of an image or volume.
Super Class:
vtkImageInPlaceFilter
vtkImageCursor3D will draw a cursor on a 2d image or 3d volume.
"""
class vtkImageDataGeometryFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageDataGeometryFilter - extract geometry for structured points
Super Class:
vtkPolyDataAlgorithm
vtkImageDataGeometryFilter is a filter that extracts geometry from a
structured points dataset. By specifying appropriate i-j-k indices (via the
"Extent" instance variable), it is possible to extract a point, a line, a
plane (i.e., image), or a "volume" from dataset. (Since the output is
of type polydata, the volume is actually a (n x m x o) region of points.)
The extent specification is zero-offset. That is, the first k-plane in
a 50x50x50 volume is given by (0,49, 0,49, 0,0).
Caveats:
If you don't know the dimensions of the input dataset, you can use a large
number to specify extent (the number will be clamped appropriately). For
example, if the dataset dimensions are 50x50x50, and you want a the fifth
k-plane, you can use the extents (0,100, 0,100, 4,4). The 100 will
automatically be clamped to 49.
See Also:
vtkGeometryFilter vtkStructuredGridSource
"""
class vtkImageDataStreamer:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageDataStreamer - Initiates streaming on image data.
Super Class:
vtkImageAlgorithm
To satisfy a request, this filter calls update on its input
many times with smaller update extents. All processing up stream
streams smaller pieces.
"""
class vtkImageDifference:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageDifference - Compares images for regression tests.
Super Class:
vtkThreadedImageAlgorithm
vtkImageDifference takes two rgb unsigned char images and compares them.
It allows the images to be slightly different. If AllowShift is on,
then each pixel can be shifted by one pixel. Threshold is the allowable
error for each pixel.
This is not a symetric filter and the difference computed is not symetric
when AllowShift is on. Specifically in that case a pixel in SetImage input
will be compared to the matching pixel in the input as well as to the
input's eight connected neighbors. BUT... the opposite is not true. So for
example if a valid image (SetImage) has a single white pixel in it, it
will not find a match in the input image if the input image is black
(because none of the nine suspect pixels are white). In contrast, if there
is a single white pixel in the input image and the valid image (SetImage)
is all black it will match with no error because all it has to do is find
black pixels and even though the input image has a white pixel, its
neighbors are not white.
"""
class vtkImageDilateErode3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageDilateErode3D - Dilates one value and erodes another.
Super Class:
vtkImageSpatialAlgorithm
vtkImageDilateErode3D will dilate one value and erode another.
It uses an elliptical foot print, and only erodes/dilates on the
boundary of the two values. The filter is restricted to the
X, Y, and Z axes for now. It can degenerate to a 2 or 1 dimensional
filter by setting the kernel size to 1 for a specific axis.
"""
class vtkImageDivergence:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageDivergence - Divergence of a vector field.
Super Class:
vtkThreadedImageAlgorithm
vtkImageDivergence takes a 3D vector field
and creates a scalar field which
which represents the rate of change of the vector field.
The definition of Divergence:
Given V = P(x,y,z), Q(x,y,z), R(x,y,z),
Divergence = dP/dx + dQ/dy + dR/dz.
"""
class vtkImageDotProduct:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageDotProduct - Dot product of two vector images.
Super Class:
vtkThreadedImageAlgorithm
vtkImageDotProduct interprets the scalar components of two images
as vectors and takes the dot product vector by vector (pixel by pixel).
"""
class vtkImageEllipsoidSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageEllipsoidSource - Create a binary image of an ellipsoid.
Super Class:
vtkImageAlgorithm
vtkImageEllipsoidSource creates a binary image of a ellipsoid. It was created
as an example of a simple source, and to test the mask filter.
It is also used internally in vtkImageDilateErode3D.
"""
class vtkImageEuclideanDistance:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageEuclideanDistance - computes 3D Euclidean DT
Super Class:
vtkImageDecomposeFilter
vtkImageEuclideanDistance implements the Euclidean DT using
Saito's algorithm. The distance map produced contains the square of the
Euclidean distance values.
The algorithm has a o(n^(D+1)) complexity over nxnx...xn images in D
dimensions. It is very efficient on relatively small images. Cuisenaire's
algorithms should be used instead if n >> 500. These are not implemented
yet.
For the special case of images where the slice-size is a multiple of
2^N with a large N (typically for 256x256 slices), Saito's algorithm
encounters a lot of cache conflicts during the 3rd iteration which can
slow it very significantly. In that case, one should use
::SetAlgorithmToSaitoCached() instead for better performance.
References:
T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance
transformations of an n-dimensional digitised picture with applications.
Pattern Recognition, 27(11). pp. 1551--1565, 1994.
O. Cuisenaire. Distance Transformation: fast algorithms and applications
to medical image processing. PhD Thesis, Universite catholique de Louvain,
October 1999. http://ltswww.epfl.ch/~cuisenai/papers/oc_thesis.pdf
"""
class vtkImageEuclideanToPolar:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageEuclideanToPolar - Converts 2D Euclidean coordinates to polar.
Super Class:
vtkThreadedImageAlgorithm
For each pixel with vector components x,y, this filter outputs
theta in component0, and radius in component1.
"""
class vtkImageExport:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageExport - Export VTK images to third-party systems.
Super Class:
vtkImageAlgorithm
vtkImageExport provides a way of exporting image data at the end
of a pipeline to a third-party system or to a simple C array.
Applications can use this to get direct access to the image data
in memory. A callback interface is provided to allow connection
of the VTK pipeline to a third-party pipeline. This interface
conforms to the interface of vtkImageImport.
In Python it is possible to use this class to write the image data
into a python string that has been pre-allocated to be the correct
size.
See Also:
vtkImageImport
"""
class vtkImageExtractComponents:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageExtractComponents - Outputs a single component
Super Class:
vtkThreadedImageAlgorithm
vtkImageExtractComponents takes an input with any number of components
and outputs some of them. It does involve a copy of the data.
See Also:
vtkImageAppendComponents
"""
class vtkImageFFT:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageFFT - Fast Fourier Transform.
Super Class:
vtkImageFourierFilter
vtkImageFFT implements a fast Fourier transform. The input
can have real or complex data in any components and data types, but
the output is always complex doubles with real values in component0, and
imaginary values in component1. The filter is fastest for images that
have power of two sizes. The filter uses a butterfly fitlers for each
prime factor of the dimension. This makes images with prime number dimensions
(i.e. 17x17) much slower to compute. Multi dimensional (i.e volumes)
FFT's are decomposed so that each axis executes in series.
"""
class vtkImageFlip:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageFlip - This flips an axis of an image. Right becomes left ...
Super Class:
vtkImageReslice
vtkImageFlip will reflect the data along the filtered axis. This filter is
actually a thin wrapper around vtkImageReslice.
"""
class vtkImageFourierCenter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageFourierCenter - Shifts constant frequency to center for
Super Class:
vtkImageDecomposeFilter
Is used for dispaying images in frequency space. FFT converts spatial
images into frequency space, but puts the zero frequency at the origin.
This filter shifts the zero frequency to the center of the image.
Input and output are assumed to be doubles.
"""
class vtkImageGaussianSmooth:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageGaussianSmooth - Performs a gaussian convolution.
Super Class:
vtkThreadedImageAlgorithm
vtkImageGaussianSmooth implements a convolution of the input image
with a gaussian. Supports from one to three dimensional convolutions.
"""
class vtkImageGaussianSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageGaussianSource - Create an image with Gaussian pixel values.
Super Class:
vtkImageAlgorithm
vtkImageGaussianSource just produces images with pixel values determined
by a Gaussian.
"""
class vtkImageGradient:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageGradient - Computes the gradient vector.
Super Class:
vtkThreadedImageAlgorithm
vtkImageGradient computes the gradient vector of an image. The
vector results are stored as scalar components. The Dimensionality
determines whether to perform a 2d or 3d gradient. The default is
two dimensional XY gradient. OutputScalarType is always
double. Gradient is computed using central differences.
"""
class vtkImageGradientMagnitude:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageGradientMagnitude - Computes magnitude of the gradient.
Super Class:
vtkThreadedImageAlgorithm
vtkImageGradientMagnitude computes the gradient magnitude of an image.
Setting the dimensionality determines whether the gradient is computed on
2D images, or 3D volumes. The default is two dimensional XY images.
See Also:
vtkImageGradient vtkImageMagnitude
"""
class vtkImageGridSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageGridSource - Create an image of a grid.
Super Class:
vtkImageAlgorithm
vtkImageGridSource produces an image of a grid. The
default output type is double.
"""
class vtkImageHSIToRGB:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageHSIToRGB - Converts HSI components to RGB.
Super Class:
vtkThreadedImageAlgorithm
For each pixel with hue, saturation and intensity components this filter
outputs the color coded as red, green, blue. Output type must be the same
as input type.
See Also:
vtkImageRGBToHSI
"""
class vtkImageHSVToRGB:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageHSVToRGB - Converts HSV components to RGB.
Super Class:
vtkThreadedImageAlgorithm
For each pixel with hue, saturation and value components this filter
outputs the color coded as red, green, blue. Output type must be the same
as input type.
See Also:
vtkImageRGBToHSV
"""
class vtkImageHybridMedian2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageHybridMedian2D - Median filter that preserves lines and
Super Class:
vtkImageSpatialAlgorithm
vtkImageHybridMedian2D is a median filter that preserves thin lines and
corners. It operates on a 5x5 pixel neighborhood. It computes two values
initially: the median of the + neighbors and the median of the x
neighbors. It then computes the median of these two values plus the center
pixel. This result of this second median is the output pixel value.
"""
class vtkImageIdealHighPass:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageIdealHighPass - Simple frequency domain band pass.
Super Class:
vtkThreadedImageAlgorithm
This filter only works on an image after it has been converted to
frequency domain by a vtkImageFFT filter. A vtkImageRFFT filter
can be used to convert the output back into the spatial domain.
vtkImageIdealHighPass just sets a portion of the image to zero. The sharp
cutoff in the frequence domain produces ringing in the spatial domain.
Input and Output must be doubles. Dimensionality is set when the axes are
set. Defaults to 2D on X and Y axes.
See Also:
vtkImageButterworthHighPass vtkImageIdealLowPass vtkImageFFT vtkImageRFFT
"""
class vtkImageIdealLowPass:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageIdealLowPass - Simple frequency domain band pass.
Super Class:
vtkThreadedImageAlgorithm
This filter only works on an image after it has been converted to
frequency domain by a vtkImageFFT filter. A vtkImageRFFT filter
can be used to convert the output back into the spatial domain.
vtkImageIdealLowPass just sets a portion of the image to zero. The result
is an image with a lot of ringing. Input and Output must be doubles.
Dimensionality is set when the axes are set. Defaults to 2D on X and Y
axes.
See Also:
vtkImageButterworthLowPass vtkImageIdealHighPass vtkImageFFT vtkImageRFFT
"""
class vtkImageImport:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageImport - Import data from a C array.
Super Class:
vtkImageAlgorithm
vtkImageImport provides methods needed to import image data from a source
independent of VTK, such as a simple C array or a third-party pipeline.
Note that the VTK convention is for the image voxel index (0,0,0) to be
the lower-left corner of the image, while most 2D image formats use
the upper-left corner. You can use vtkImageFlip to correct the
orientation after the image has been loaded into VTK.
Note that is also possible to import the raw data from a Python string
instead of from a C array.
See Also:
vtkImageExport
"""
class vtkImageIslandRemoval2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageIslandRemoval2D - Removes small clusters in masks.
Super Class:
vtkImageAlgorithm
vtkImageIslandRemoval2D computes the area of separate islands in
a mask image. It removes any island that has less than AreaThreshold
pixels. Output has the same ScalarType as input. It generates
the whole 2D output image for any output request.
"""
class vtkImageLaplacian:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageLaplacian - Computes divergence of gradient.
Super Class:
vtkThreadedImageAlgorithm
vtkImageLaplacian computes the Laplacian (like a second derivative)
of a scalar image. The operation is the same as taking the
divergence after a gradient. Boundaries are handled, so the input
is the same as the output.
Dimensionality determines how the input regions are interpreted.
(images, or volumes). The Dimensionality defaults to two.
"""
class vtkImageLogarithmicScale:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageLogarithmicScale - Passes each pixel through log function.
Super Class:
vtkThreadedImageAlgorithm
vtkImageLogarithmicScale passes each pixel through the function
c*log(1+x). It also handles negative values with the function
-c*log(1-x).
"""
class vtkImageLogic:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageLogic - And, or, xor, nand, nor, not.
Super Class:
vtkThreadedImageAlgorithm
vtkImageLogic implements basic logic operations.
SetOperation is used to select the filter's behavior.
The filter can take two or one input. Inputs must have the same type.
"""
class vtkImageLuminance:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageLuminance - Computes the luminance of the input
Super Class:
vtkThreadedImageAlgorithm
vtkImageLuminance calculates luminance from an rgb input.
"""
class vtkImageMagnify:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMagnify - magnify an image by an integer value
Super Class:
vtkThreadedImageAlgorithm
vtkImageMagnify maps each pixel of the input onto a nxmx... region
of the output. Location (0,0,...) remains in the same place. The
magnification occurs via pixel replication, or if Interpolate is on,
by bilinear interpolation. Initially, interpolation is off and magnification
factors are set to 1 in all directions.
"""
class vtkImageMagnitude:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMagnitude - Colapses components with magnitude function..
Super Class:
vtkThreadedImageAlgorithm
vtkImageMagnitude takes the magnitude of the components.
"""
class vtkImageMandelbrotSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMandelbrotSource - Mandelbrot image.
Super Class:
vtkImageAlgorithm
vtkImageMandelbrotSource creates an unsigned char image of the Mandelbrot
set. The values in the image are the number of iterations it takes for
the magnitude of the value to get over 2. The equation repeated is
z = z^2 + C (z and C are complex). Initial value of z is zero, and the
real value of C is mapped onto the x axis, and the imaginary value of C
is mapped onto the Y Axis. I was thinking of extending this source
to generate Julia Sets (initial value of Z varies). This would be 4
possible parameters to vary, but there are no more 4d images :(
The third dimension (z axis) is the imaginary value of the initial value.
"""
class vtkImageMapToColors:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMapToColors - map the input image through a lookup table
Super Class:
vtkThreadedImageAlgorithm
The vtkImageMapToColors filter will take an input image of any valid
scalar type, and map the first component of the image through a
lookup table. The result is an image of type VTK_UNSIGNED_CHAR.
If the lookup table is not set, or is set to NULL, then the input
data will be passed through if it is already of type VTK_UNSIGNED_CHAR.
See Also:
vtkLookupTable vtkScalarsToColors
"""
class vtkImageMapToRGBA:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMapToRGBA - map the input image through a lookup table
Super Class:
vtkImageMapToColors
This filter has been replaced by vtkImageMapToColors, which provided
additional features. Use vtkImageMapToColors instead.
See Also:
vtkLookupTable
"""
class vtkImageMapToWindowLevelColors:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMapToWindowLevelColors - map the input image through a lookup table and window / level it
Super Class:
vtkImageMapToColors
The vtkImageMapToWindowLevelColors filter will take an input image of any
valid scalar type, and map the first component of the image through a
lookup table. This resulting color will be modulated with value obtained
by a window / level operation. The result is an image of type
VTK_UNSIGNED_CHAR. If the lookup table is not set, or is set to NULL, then
the input data will be passed through if it is already of type
UNSIGNED_CHAR.
See Also:
vtkLookupTable vtkScalarsToColors
"""
class vtkImageMarchingCubes:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMarchingCubes - generate isosurface(s) from volume/images
Super Class:
vtkPolyDataAlgorithm
vtkImageMarchingCubes is a filter that takes as input images (e.g., 3D
image region) and generates on output one or more isosurfaces.
One or more contour values must be specified to generate the isosurfaces.
Alternatively, you can specify a min/max scalar range and the number of
contours to generate a series of evenly spaced contour values.
This filter can stream, so that the entire volume need not be loaded at
once. Streaming is controlled using the instance variable
InputMemoryLimit, which has units KBytes.
Caveats:
This filter is specialized to volumes. If you are interested in
contouring other types of data, use the general vtkContourFilter. If you
want to contour an image (i.e., a volume slice), use vtkMarchingSquares.
See Also:
vtkContourFilter vtkSliceCubes vtkMarchingSquares vtkSynchronizedTemplates3D
"""
class vtkImageMask:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMask - Combines a mask and an image.
Super Class:
vtkThreadedImageAlgorithm
vtkImageMask combines a mask with an image. Non zero mask
implies the output pixel will be the same as the image.
If a mask pixel is zero, the the output pixel
is set to "MaskedValue". The filter also has the option to pass
the mask through a boolean not operation before processing the image.
This reverses the passed and replaced pixels.
The two inputs should have the same "WholeExtent".
The mask input should be unsigned char, and the image scalar type
is the same as the output scalar type.
"""
class vtkImageMaskBits:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMaskBits - applies a bit-mask pattern to each component.
Super Class:
vtkThreadedImageAlgorithm
vtkImageMaskBits applies a bit-mask pattern to each component. The
bit-mask can be applied using a variety of boolean bitwise operators.
"""
class vtkImageMathematics:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMathematics - Add, subtract, multiply, divide, invert, sin,
Super Class:
vtkThreadedImageAlgorithm
vtkImageMathematics implements basic mathematic operations SetOperation is
used to select the filters behavior. The filter can take two or one
input.
"""
class vtkImageMedian3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMedian3D - Median Filter
Super Class:
vtkImageSpatialAlgorithm
vtkImageMedian3D a Median filter that replaces each pixel with the
median value from a rectangular neighborhood around that pixel.
Neighborhoods can be no more than 3 dimensional. Setting one
axis of the neighborhood kernelSize to 1 changes the filter
into a 2D median.
"""
class vtkImageMirrorPad:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageMirrorPad - Extra pixels are filled by mirror images.
Super Class:
vtkImagePadFilter
vtkImageMirrorPad makes an image larger by filling extra pixels with
a mirror image of the original image (mirror at image boundaries).
"""
class vtkImageNoiseSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageNoiseSource - Create an image filled with noise.
Super Class:
vtkImageAlgorithm
vtkImageNoiseSource just produces images filled with noise. The only
option now is uniform noise specified by a min and a max. There is one
major problem with this source. Every time it executes, it will output
different pixel values. This has important implications when a stream
requests overlapping regions. The same pixels will have different values
on different updates.
"""
class vtkImageNonMaximumSuppression:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageNonMaximumSuppression - Performs non-maximum suppression
Super Class:
vtkThreadedImageAlgorithm
vtkImageNonMaximumSuppression Sets to zero any pixel that is not a peak.
If a pixel has a neighbor along the vector that has larger magnitude, the
smaller pixel is set to zero. The filter takes two inputs: a magnitude
and a vector. Output is magnitude information and is always in doubles.
Typically this filter is used with vtkImageGradient and
vtkImageGradientMagnitude as inputs.
"""
class vtkImageNormalize:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageNormalize - Normalizes that scalar components for each point.
Super Class:
vtkThreadedImageAlgorithm
For each point, vtkImageNormalize normalizes the vector defined by the
scalar components. If the magnitude of this vector is zero, the output
vector is zero also.
"""
class vtkImageOpenClose3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageOpenClose3D - Will perform opening or closing.
Super Class:
vtkImageAlgorithm
vtkImageOpenClose3D performs opening or closing by having two
vtkImageErodeDilates in series. The size of operation
is determined by the method SetKernelSize, and the operator is an ellipse.
OpenValue and CloseValue determine how the filter behaves. For binary
images Opening and closing behaves as expected.
Close value is first dilated, and then eroded.
Open value is first eroded, and then dilated.
Degenerate two dimensional opening/closing can be achieved by setting the
one axis the 3D KernelSize to 1.
Values other than open value and close value are not touched.
This enables the filter to processes segmented images containing more than
two tags.
"""
class vtkImagePadFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImagePadFilter - Super class for filters that fill in extra pixels.
Super Class:
vtkThreadedImageAlgorithm
vtkImagePadFilter Changes the image extent of an image. If the image
extent is larger than the input image extent, the extra pixels are
filled by an algorithm determined by the subclass.
The image extent of the output has to be specified.
"""
class vtkImagePermute:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImagePermute - Permutes axes of input.
Super Class:
vtkImageReslice
vtkImagePermute reorders the axes of the input. Filtered axes specify
the input axes which become X, Y, Z. The input has to have the
same scalar type of the output. The filter does copy the
data when it executes. This filter is actually a very thin wrapper
around vtkImageReslice.
"""
class vtkImageQuantizeRGBToIndex:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageQuantizeRGBToIndex - generalized histograms up to 4 dimensions
Super Class:
vtkImageAlgorithm
vtkImageQuantizeRGBToIndex takes a 3 component RGB image as
input and produces a one component index image as output, along with
a lookup table that contains the color definitions for the index values.
This filter works on the entire input extent - it does not perform
streaming, and it does not supported threaded execution (because it has
to process the entire image).
To use this filter, you typically set the number of colors
(between 2 and 65536), execute it, and then retrieve the lookup table.
The colors can then be using the lookup table and the image index.
"""
class vtkImageRFFT:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageRFFT - Reverse Fast Fourier Transform.
Super Class:
vtkImageFourierFilter
vtkImageRFFT implements the reverse fast Fourier transform. The input
can have real or complex data in any components and data types, but
the output is always complex doubles with real values in component0, and
imaginary values in component1. The filter is fastest for images that
have power of two sizes. The filter uses a butterfly fitlers for each
prime factor of the dimension. This makes images with prime number dimensions
(i.e. 17x17) much slower to compute. Multi dimensional (i.e volumes)
FFT's are decomposed so that each axis executes in series.
In most cases the RFFT will produce an image whose imaginary values are all
zero's. In this case vtkImageExtractComponents can be used to remove
this imaginary components leaving only the real image.
See Also:
vtkImageExtractComponenents
"""
class vtkImageRGBToHSI:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageRGBToHSI - Converts RGB components to HSI.
Super Class:
vtkThreadedImageAlgorithm
For each pixel with red, blue, and green components this
filter output the color coded as hue, saturation and intensity.
Output type must be the same as input type.
"""
class vtkImageRGBToHSV:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageRGBToHSV - Converts RGB components to HSV.
Super Class:
vtkThreadedImageAlgorithm
For each pixel with red, blue, and green components this
filter output the color coded as hue, saturation and value.
Output type must be the same as input type.
"""
class vtkImageRange3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageRange3D - Max - min of a circular neighborhood.
Super Class:
vtkImageSpatialAlgorithm
vtkImageRange3D replaces a pixel with the maximum minus minimum over
an ellipsoidal neighborhood. If KernelSize of an axis is 1, no processing
is done on that axis.
"""
class vtkImageReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkImageReader - Superclass of transformable binary file readers.
Super Class:
vtkImageReader2
vtkImageReader provides methods needed to read a region from a file.
It supports both transforms and masks on the input data, but as a result
is more complicated and slower than its parent class vtkImageReader2.
See Also:
vtkBMPReader vtkPNMReader vtkTIFFReader
"""
class vtkImageReader2:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageReader2 - Superclass of binary file readers.
Super Class:
vtkImageAlgorithm
vtkImageReader2 is the parent class for vtkImageReader. It
is a good super class for streaming readers that do not require
a mask or transform on the data. vtkImageReader was implemented
before vtkImageReader2, vtkImageReader2 is intended to have
a simpler interface.
See Also:
vtkJPEGReader vtkPNGReader vtkImageReader vtkGESignaReader
"""
class vtkImageRectilinearWipe:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageRectilinearWipe - make a rectilinear combination of two images.
Super Class:
vtkThreadedImageAlgorithm
vtkImageRectilinearWipe makes a rectilinear combination of two
images. The two input images must correspond in size, scalar type and
number of components.
The resulting image has four possible configurations
called:
Quad - alternate input 0 and input 1 horizontally and
vertically. Select this with SetWipeModeToQuad. The Position
specifies the location of the quad intersection.
Corner - 3 of one input and 1 of the other. Select the location of
input 0 with with SetWipeModeToLowerLeft, SetWipeModeToLowerRight,
SetWipeModeToUpperLeft and SetWipeModeToUpperRight. The Position
selects the location of the corner.
Horizontal - alternate input 0 and input 1 with a vertical
split. Select this with SetWipeModeToHorizontal. Position[0]
specifies the location of the vertical transition between input 0
and input 1.
Vertical - alternate input 0 and input 1 with a horizontal
split. Only the y The intersection point of the rectilinear points
is controlled with the Point ivar.
See Also:
vtkImageCheckerboard
"""
class vtkImageResample:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageResample - Resamples an image to be larger or smaller.
Super Class:
vtkImageReslice
This filter produces an output with different spacing (and extent)
than the input. Linear interpolation can be used to resample the data.
The Output spacing can be set explicitly or relative to input spacing
with the SetAxisMagnificationFactor method.
"""
class vtkImageReslice:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageReslice - Reslices a volume along a new set of axes.
Super Class:
vtkThreadedImageAlgorithm
vtkImageReslice is the swiss-army-knife of image geometry filters:
It can permute, rotate, flip, scale, resample, deform, and pad image
data in any combination with reasonably high efficiency. Simple
operations such as permutation, resampling and padding are done
with similar efficiently to the specialized vtkImagePermute,
vtkImageResample, and vtkImagePad filters. There are a number of
tasks that vtkImageReslice is well suited for:
<p>1) Application of simple rotations, scales, and translations to
an image. It is often a good idea to use vtkImageChangeInformation
to center the image first, so that scales and rotations occur around
the center rather than around the lower-left corner of the image.
<p>2) Resampling of one data set to match the voxel sampling of
a second data set via the SetInformationInput() method, e.g. for
the purpose of comparing two images or combining two images.
A transformation, either linear or nonlinear, can be applied
at the same time via the SetResliceTransform method if the two
images are not in the same coordinate space.
<p>3) Extraction of slices from an image volume. The most convenient
way to do this is to use SetResliceAxesDirectionCosines() to
specify the orientation of the slice. The direction cosines give
the x, y, and z axes for the output volume. The method
SetOutputDimensionality(2) is used to specify that want to output a
slice rather than a volume. The SetResliceAxesOrigin() command is
used to provide an (x,y,z) point that the slice will pass through.
You can use both the ResliceAxes and the ResliceTransform at the
same time, in order to extract slices from a volume that you have
applied a transformation to.
Caveats:
This filter is very inefficient if the output X dimension is 1.
See Also:
vtkAbstractTransform vtkMatrix4x4
"""
class vtkImageSeedConnectivity:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageSeedConnectivity - SeedConnectivity with user defined seeds.
Super Class:
vtkImageAlgorithm
vtkImageSeedConnectivity marks pixels connected to user supplied seeds.
The input must be unsigned char, and the output is also unsigned char. If
a seed supplied by the user does not have pixel value "InputTrueValue",
then the image is scanned +x, +y, +z until a pixel is encountered with
value "InputTrueValue". This new pixel is used as the seed . Any pixel
with out value "InputTrueValue" is consider off. The output pixels values
are 0 for any off pixel in input, "OutputTrueValue" for any pixels
connected to seeds, and "OutputUnconnectedValue" for any on pixels not
connected to seeds. The same seeds are used for all images in the image
set.
"""
class vtkImageSeparableConvolution:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageSeparableConvolution - 3 1D convolutions on an image
Super Class:
vtkImageDecomposeFilter
vtkImageSeparableConvolution performs a convolution along the X, Y,
and Z axes of an image, based on the three different 1D convolution
kernels. The kernels must be of odd size, and are considered to be
centered at (int)((kernelsize - 1) / 2.0 ). If a kernel is NULL,
that dimension is skipped. This filter is designed to efficiently
convolve separable filters that can be decomposed into 1 or more 1D
convolutions. It also handles arbitrarly large kernel sizes, and
uses edge replication to handle boundaries.
"""
class vtkImageShiftScale:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageShiftScale - shift and scale an input image
Super Class:
vtkThreadedImageAlgorithm
With vtkImageShiftScale Pixels are shifted and then scaled. As
a convenience, this class allows you to set the output scalar type
similar to vtkImageCast. This is because shift scale operations
frequently convert data types.
"""
class vtkImageShrink3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageShrink3D - Subsamples an image.
Super Class:
vtkThreadedImageAlgorithm
vtkImageShrink3D shrinks an image by sub sampling on a
uniform grid (integer multiples).
"""
class vtkImageSinusoidSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageSinusoidSource - Create an image with sinusoidal pixel values.
Super Class:
vtkImageAlgorithm
vtkImageSinusoidSource just produces images with pixel values determined
by a sinusoid.
"""
class vtkImageSkeleton2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageSkeleton2D - Skeleton of 2D images.
Super Class:
vtkImageIterateFilter
vtkImageSkeleton2D should leave only single pixel width lines
of non-zero-valued pixels (values of 1 are not allowed).
It works by erosion on a 3x3 neighborhood with special rules.
The number of iterations determines how far the filter can erode.
There are three pruning levels:
prune == 0 will leave traces on all angles...
prune == 1 will not leave traces on 135 degree angles, but will on 90.
prune == 2 does not leave traces on any angles leaving only closed loops.
Prune defaults to zero. The output scalar type is the same as the input.
"""
class vtkImageSobel2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageSobel2D - Computes a vector field using sobel functions.
Super Class:
vtkImageSpatialAlgorithm
vtkImageSobel2D computes a vector field from a scalar field by using
Sobel functions. The number of vector components is 2 because
the input is an image. Output is always doubles.
"""
class vtkImageSobel3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageSobel3D - Computes a vector field using sobel functions.
Super Class:
vtkImageSpatialAlgorithm
vtkImageSobel3D computes a vector field from a scalar field by using
Sobel functions. The number of vector components is 3 because
the input is a volume. Output is always doubles. A little creative
liberty was used to extend the 2D sobel kernels into 3D.
"""
class vtkImageSpatialAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageSpatialAlgorithm - Filters that operate on pixel neighborhoods.
Super Class:
vtkThreadedImageAlgorithm
vtkImageSpatialAlgorithm is a super class for filters that operate on an
input neighborhood for each output pixel. It handles even sized
neighborhoods, but their can be a half pixel shift associated with
processing. This superclass has some logic for handling boundaries. It
can split regions into boundary and non-boundary pieces and call different
execute methods.
"""
class vtkImageSpatialFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageSpatialFilter - Filters that operate on pixel neighborhoods.
Super Class:
vtkImageToImageFilter
vtkImageSpatialFilter is a super class for filters that operate on an
input neighborhood for each output pixel. It handles even sized
neighborhoods, but their can be a half pixel shift associated with
processing. This superclass has some logic for handling boundaries. It
can split regions into boundary and non-boundary pieces and call different
execute methods.
.SECTION Warning
This used to be the parent class for most imaging filter in VTK4.x, now
this role has been replaced by vtkImageSpatialAlgorithm. You should consider
using vtkImageSpatialAlgorithm instead, when writing filter for VTK5 and above.
This class was kept to ensure full backward compatibility.
.SECTION See also
vtkSimpleImageToImageFilter vtkImageToImageFilter vtkImageSpatialAlgorithm
"""
class vtkImageStencil:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageStencil - combine images via a cookie-cutter operation
Super Class:
vtkThreadedImageAlgorithm
vtkImageStencil will combine two images together using a stencil.
The stencil should be provided in the form of a vtkImageStencilData,
"""
class vtkImageThreshold:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageThreshold - Flexible threshold
Super Class:
vtkThreadedImageAlgorithm
vtkImageThreshold can do binary or continuous thresholding for lower, upper
or a range of data. The output data type may be different than the
output, but defaults to the same type.
"""
class vtkImageToImageStencil:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageToImageStencil - clip an image with a mask image
Super Class:
vtkImageStencilSource
vtkImageToImageStencil will convert a vtkImageData into an stencil
that can be used with vtkImageStecil or other vtk classes that apply
a stencil to an image.
See Also:
vtkImageStencil vtkImplicitFunctionToImageStencil vtkPolyDataToImageStencil
"""
class vtkImageToPolyDataFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageToPolyDataFilter - generate linear primitives (vtkPolyData) from an image
Super Class:
vtkPolyDataAlgorithm
vtkImageToPolyDataFilter converts raster data (i.e., an image) into
polygonal data (i.e., quads or n-sided polygons), with each polygon
assigned a constant color. This is useful for writers that generate vector
formats (i.e., CGM or PostScript). To use this filter, you specify how to
quantize the color (or whether to use an image with a lookup table), and
what style the output should be. The output is always polygons, but the
choice is n x m quads (where n and m define the input image dimensions)
"Pixelize" option; arbitrary polygons "Polygonalize" option; or variable
number of quads of constant color generated along scan lines "RunLength"
option.
The algorithm quantizes color in order to create coherent regions that the
polygons can represent with good compression. By default, the input image
is quantized to 256 colors using a 3-3-2 bits for red-green-blue. However,
you can also supply a single component image and a lookup table, with the
single component assumed to be an index into the table. (Note: a quantized
image can be generated with the filter vtkImageQuantizeRGBToIndex.) The
number of colors on output is equal to the number of colors in the input
lookup table (or 256 if the built in linear ramp is used).
The output of the filter is polygons with a single color per polygon cell.
If the output style is set to "Polygonalize", the polygons may have an
large number of points (bounded by something like 2*(n+m)); and the
polygon may not be convex which may cause rendering problems on some
systems (use vtkTriangleFilter). Otherwise, each polygon will have four
vertices. The output also contains scalar data defining RGB color in
unsigned char form.
Caveats:
The input linear lookup table must
be of the form of 3-component unsigned char.
This filter defines constant cell colors. If you have a plotting
device that supports Gouraud shading (linear interpolation of color), then
superior algorithms are available for generating polygons from images.
Note that many plotting devices/formats support only a limited number of
colors.
See Also:
vtkCGMWriter vtkImageQuantizeRGBToIndex vtkTriangleFilter
"""
class vtkImageToStructuredPoints:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageToStructuredPoints - Attaches image pipeline to VTK.
Super Class:
vtkImageAlgorithm
vtkImageToStructuredPoints changes an image cache format to
a structured points dataset. It takes an Input plus an optional
VectorInput. The VectorInput converts the RGB scalar components
of the VectorInput to vector pointdata attributes. This filter
will try to reference count the data but in some cases it must
make a copy.
"""
class vtkImageTranslateExtent:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageTranslateExtent - Changes extent, nothing else.
Super Class:
vtkImageAlgorithm
vtkImageTranslateExtent shift the whole extent, but does not
change the data.
"""
class vtkImageVariance3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageVariance3D - Variance in a neighborhood.
Super Class:
vtkImageSpatialAlgorithm
vtkImageVariance3D replaces each pixel with a measurement of
pixel variance in a elliptical neighborhood centered on that pixel.
The value computed is not exactly the variance.
The difference between the neighbor values and center value is computed
and squared for each neighbor. These values are summed and divided by
the total number of neighbors to produce the output value.
"""
class vtkImageWeightedSum:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageWeightedSum - adds any number of images, weighting
Super Class:
vtkThreadedImageAlgorithm
All weights are normalized so they will sum to 1.
Images must have the same extents. Output is
.SECTION Thanks
The original author of this class is Lauren O'Donnell (MIT) for Slicer
"""
class vtkImageWrapPad:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImageWrapPad - Makes an image larger by wrapping existing data.
Super Class:
vtkImagePadFilter
vtkImageWrapPad performs a modulo operation on the output pixel index
to determine the source input index. The new image extent of the
output has to be specified. Input has to be the same scalar type as
output.
"""
class vtkImageWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkImageWriter - Writes images to files.
Super Class:
vtkImageAlgorithm
vtkImageWriter writes images to files with any data type. The data type of
the file is the same scalar type as the input. The dimensionality
determines whether the data will be written in one or multiple files.
This class is used as the superclass of most image writing classes
such as vtkBMPWriter etc. It supports streaming.
"""
class vtkImplicitFunctionToImageStencil:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImplicitFunctionToImageStencil - clip an image with a function
Super Class:
vtkImageStencilSource
vtkImplicitFunctionToImageStencil will convert a vtkImplicitFunction into
a stencil that can be used with vtkImageStencil or with other classes
that apply a stencil to an image.
See Also:
vtkImplicitFunction vtkImageStencil vtkPolyDataToImageStencil
"""
class vtkImplicitModeller:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImplicitModeller - compute distance from input geometry on structured point dataset
Super Class:
vtkImageAlgorithm
vtkImplicitModeller is a filter that computes the distance from the input
geometry to the points of an output structured point set. This distance
function can then be "contoured" to generate new, offset surfaces from
the original geometry. An important feature of this object is
"capping". If capping is turned on, after the implicit model is created,
the values on the boundary of the structured points dataset are set to
the cap value. This is used to force closure of the resulting contoured
surface. Note, however, that large cap values can generate weird surface
normals in those cells adjacent to the boundary of the dataset. Using
smaller cap value will reduce this effect.
<P>
Another important ivar is MaximumDistance. This controls how far into the
volume the distance function is computed from the input geometry. Small
values give significant increases in performance. However, there can
strange sampling effects at the extreme range of the MaximumDistance.
<P>
In order to properly execute and sample the input data, a rectangular
region in space must be defined (this is the ivar ModelBounds). If not
explicitly defined, the model bounds will be computed. Note that to avoid
boundary effects, it is possible to adjust the model bounds (i.e., using
the AdjustBounds and AdjustDistance ivars) to strictly contain the
sampled data.
<P>
This filter has one other unusual capability: it is possible to append
data in a sequence of operations to generate a single output. This is
useful when you have multiple datasets and want to create a
conglomeration of all the data. However, the user must be careful to
either specify the ModelBounds or specify the first item such that its
bounds completely contain all other items. This is because the
rectangular region of the output can not be changed after the 1st Append.
<P>
The ProcessMode ivar controls the method used within the Append function
(where the actual work is done regardless if the Append function is
explicitly called) to compute the implicit model. If set to work in voxel
mode, each voxel is visited once. If set to cell mode, each cell is visited
once. Tests have shown once per voxel to be faster when there are a
lot of cells (at least a thousand?); relative performance improvement
increases with addition cells. Primitives should not be stripped for best
performance of the voxel mode. Also, if explicitly using the Append feature
many times, the cell mode will probably be better because each voxel will be
visited each Append. Append the data before input if possible when using
the voxel mode. Do not switch between voxel and cell mode between execution
of StartAppend and EndAppend.
<P>
Further performance improvement is now possible using the PerVoxel process
mode on multi-processor machines (the mode is now multithreaded). Each
thread processes a different "slab" of the output. Also, if the input is
vtkPolyData, it is appropriately clipped for each thread; that is, each
thread only considers the input which could affect its slab of the output.
<P>
This filter can now produce output of any type supported by vtkImageData.
However to support this change, additional sqrts must be executed during the
Append step. Previously, the output was initialized to the squared CapValue
in StartAppend, the output was updated with squared distance values during
the Append, and then the sqrt of the distances was computed in EndAppend.
To support different scalar types in the output (largely to reduce memory
requirements as an vtkImageShiftScale and/or vtkImageCast could have
achieved the same result), we can't "afford" to save squared value in the
output, because then we could only represent up to the sqrt of the scalar
max for an integer type in the output; 1 (instead of 255) for an unsigned
char; 11 for a char (instead of 127). Thus this change may result in a
minor performance degradation. Non-float output types can be scaled to the
CapValue by turning ScaleToMaximumDistance On.
See Also:
vtkSampleFunction vtkContourFilter
"""
class vtkImplicitTextureCoords:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkImplicitTextureCoords - generate 1D, 2D, or 3D texture coordinates based on implicit function(s)
Super Class:
vtkDataSetAlgorithm
vtkImplicitTextureCoords is a filter to generate 1D, 2D, or 3D texture
coordinates from one, two, or three implicit functions, respectively.
In combinations with a vtkBooleanTexture map (or another texture map of
your own creation), the texture coordinates can be used to highlight
(via color or intensity) or cut (via transparency) dataset geometry without
any complex geometric processing. (Note: the texture coordinates are
referred to as r-s-t coordinates.)
The texture coordinates are automatically normalized to lie between (0,1).
Thus, no matter what the implicit functions evaluate to, the resulting
texture coordinates lie between (0,1), with the zero implicit function
value mapped to the 0.5 texture coordinates value. Depending upon the
maximum negative/positive implicit function values, the full (0,1) range
may not be occupied (i.e., the positive/negative ranges are mapped using
the same scale factor).
A boolean variable InvertTexture is available to flip the texture
coordinates around 0.5 (value 1.0 becomes 0.0, 0.25->0.75). This is
equivalent to flipping the texture map (but a whole lot easier).
Caveats:
You can use the transformation capabilities of vtkImplicitFunction to
orient, translate, and scale the implicit functions. Also, the dimension of
the texture coordinates is implicitly defined by the number of implicit
functions defined.
See Also:
vtkImplicitFunction vtkTexture vtkBooleanTexture vtkTransformTexture
"""
class vtkInterpolateDataSetAttributes:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkInterpolateDataSetAttributes - interpolate scalars, vectors, etc. and other dataset attributes
Super Class:
vtkDataSetAlgorithm
vtkInterpolateDataSetAttributes is a filter that interpolates data set
attribute values between input data sets. The input to the filter
must be datasets of the same type, same number of cells, and same
number of points. The output of the filter is a data set of the same
type as the input dataset and whose attribute values have been
interpolated at the parametric value specified.
The filter is used by specifying two or more input data sets (total of N),
and a parametric value t (0 <= t <= N-1). The output will contain
interpolated data set attributes common to all input data sets. (For
example, if one input has scalars and vectors, and another has just
scalars, then only scalars will be interpolated and output.)
"""
class vtkJPEGReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkJPEGReader - read JPEG files
Super Class:
vtkImageReader2
vtkJPEGReader is a source object that reads JPEG files.
It should be able to read most any JPEG file
See Also:
vtkJPEGWriter
"""
class vtkJPEGWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkJPEGWriter - Writes JPEG files.
Super Class:
vtkImageWriter
vtkJPEGWriter writes JPEG files. It supports 1 and 3 component data of
unsigned char. It relies on the IJG's libjpeg. Thanks to IJG for
supplying a public jpeg IO library.
See Also:
vtkJPEGReader
"""
class vtkKdTreeSelector:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkKdTreeSelector - Selects point ids using a kd-tree.
Super Class:
vtkSelectionAlgorithm
If SetKdTree is used, the filter ignores the input and selects based on that
kd-tree. If SetKdTree is not used, the filter builds a kd-tree using the
input point set and uses that tree for selection. The output is a
vtkSelection containing the ids found in the kd-tree using the specified
bounds.
"""
class vtkLSDynaReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkLSDynaReader - Read LS-Dyna databases (d3plot)
Super Class:
vtkMultiBlockDataSetAlgorithm
This filter reads LS-Dyna databases.
The Set/GetFileName() routines are actually wrappers around the
Set/GetDatabaseDirectory() members; the actual filename you choose is
irrelevant -- only the directory name is used. This is done in order to
accommodate ParaView.
Note that this reader produces 7 output meshes.
These meshes are required as several attributes are defined on subsets
of the mesh. Below is a list of meshes in the order they are output and
an explanation of which attributes are unique to each mesh:
- solid (3D) elements: number of integration points are different than 2D
- thick shell elements: number of integration points are different than
planar 2D
- shell (2D) elements: number of integration points are different than 3D
- rigid surfaces: can't have deflection, only velocity, accel, etc.
- road surfaces: have only a "segment ID" (serves as material ID) and a
velocity.
- beam elements: have Frenet (TNB) frame and cross-section attributes
(shape and size)
- spherical particle hydrodynamics (SPH) elements: have a radius of
influence, internal energy, etc.
Because each mesh has its own cell attributes, the vtkLSDynaReader has a
rather large API. Instead of a single set of routines to query and set
cell array names and status, one exists for each possible output mesh.
Also, GetNumberOfCells() will return the sum of all the cells in all 7
meshes. If you want the number of cells in a specific mesh, there are
separate routines for each mesh type.
.SECTION "Developer Notes"
"""
class vtkLineSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkLineSource - create a line defined by two end points
Super Class:
vtkPolyDataAlgorithm
vtkLineSource is a source object that creates a polyline defined by
two endpoints. The number of segments composing the polyline is
controlled by setting the object resolution.
"""
class vtkLinearExtrusionFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkLinearExtrusionFilter - sweep polygonal data creating a "skirt" from free edges and lines, and lines from vertices
Super Class:
vtkPolyDataAlgorithm
vtkLinearExtrusionFilter is a modeling filter. It takes polygonal data as
input and generates polygonal data on output. The input dataset is swept
according to some extrusion function and creates new polygonal primitives.
These primitives form a "skirt" or swept surface. For example, sweeping a
line results in a quadrilateral, and sweeping a triangle creates a "wedge".
There are a number of control parameters for this filter. You can
control whether the sweep of a 2D object (i.e., polygon or triangle strip)
is capped with the generating geometry via the "Capping" ivar. Also, you
can extrude in the direction of a user specified vector, towards a point,
or in the direction of vertex normals (normals must be provided - use
vtkPolyDataNormals if necessary). The amount of extrusion is controlled by
the "ScaleFactor" instance variable.
The skirt is generated by locating certain topological features. Free
edges (edges of polygons or triangle strips only used by one polygon or
triangle strips) generate surfaces. This is true also of lines or
polylines. Vertices generate lines.
This filter can be used to create 3D fonts, 3D irregular bar charts,
or to model 2 1/2D objects like punched plates. It also can be used to
create solid objects from 2D polygonal meshes.
Caveats:
Some polygonal objects have no free edges (e.g., sphere). When swept,
this will result in two separate surfaces if capping is on, or no surface
if capping is off.
See Also:
vtkRotationalExtrusionFilter
"""
class vtkLinearSubdivisionFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkLinearSubdivisionFilter - generate a subdivision surface using the Linear Scheme
Super Class:
vtkInterpolatingSubdivisionFilter
vtkLinearSubdivisionFilter is a filter that generates output by
subdividing its input polydata. Each subdivision iteration create 4
new triangles for each triangle in the polydata.
See Also:
vtkInterpolatingSubdivisionFilter vtkButterflySubdivisionFilter
"""
class vtkLinkEdgels:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkLinkEdgels - links edgels together to form digital curves.
Super Class:
vtkPolyDataAlgorithm
vtkLinkEdgels links edgels into digital curves which are then stored
as polylines. The algorithm works one pixel at a time only looking at
its immediate neighbors. There is a GradientThreshold that can be set
that eliminates any pixels with a smaller gradient value. This can
be used as the lower threshold of a two value edgel thresholding.
For the remaining edgels, links are first tried for the four
connected neighbors. A successful neighbor will satisfy three
tests. First both edgels must be above the gradient
threshold. Second, the difference between the orientation between
the two edgels (Alpha) and each edgels orientation (Phi) must be
less than LinkThreshold. Third, the difference between the two
edgels Phi values must be less than PhiThreshold.
The most successful link is selected. The measure is simply the
sum of the three angle differences (actually stored as the sum of
the cosines). If none of the four connect neighbors succeeds, then
the eight connect neighbors are examined using the same method.
This filter requires gradient information so you will need to use
a vtkImageGradient at some point prior to this filter. Typically
a vtkNonMaximumSuppression filter is also used. vtkThresholdEdgels
can be used to complete the two value edgel thresholding as used
in a Canny edge detector. The vtkSubpixelPositionEdgels filter
can also be used after this filter to adjust the edgel locations.
See Also:
vtkImageData vtkImageGradient vtkImageNonMaximumSuppression
"""
class vtkLoopSubdivisionFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkLoopSubdivisionFilter - generate a subdivision surface using the Loop Scheme
Super Class:
vtkApproximatingSubdivisionFilter
vtkLoopSubdivisionFilter is an approximating subdivision scheme that
creates four new triangles for each triangle in the mesh. The user can
specify the NumberOfSubdivisions. Loop's subdivision scheme is
described in: Loop, C., "Smooth Subdivision surfaces based on
triangles,", Masters Thesis, University of Utah, August 1987.
For a nice summary of the technique see, Hoppe, H., et. al,
"Piecewise Smooth Surface Reconstruction,:, Proceedings of Siggraph 94
(Orlando, Florida, July 24-29, 1994). In COmputer Graphics
Proceedings, Annual COnference Series, 1994, ACM SIGGRAPH,
pp. 295-302.
<P>
The filter only operates on triangles. Users should use the
vtkTriangleFilter to triangulate meshes that contain polygons or
triangle strips.
<P>
The filter approximates point data using the same scheme. New
triangles create at a subdivision step will have the cell data of
their parent cell.
See Also:
vtkApproximatingSubdivisionFilter
"""
class vtkMCubesReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkMCubesReader - read binary marching cubes file
Super Class:
vtkPolyDataAlgorithm
vtkMCubesReader is a source object that reads binary marching cubes
files. (Marching cubes is an isosurfacing technique that generates
many triangles.) The binary format is supported by W. Lorensen's
marching cubes program (and the vtkSliceCubes object). The format
repeats point coordinates, so this object will merge the points
with a vtkLocator object. You can choose to supply the vtkLocator
or use the default.
Caveats:
Binary files assumed written in sun/hp/sgi (i.e., Big Endian) form.
Because points are merged when read, degenerate triangles may be removed.
Thus the number of triangles read may be fewer than the number of triangles
actually created.
The point merging does not take into account that the same point may have
different normals. For example, running vtkPolyDataNormals after
vtkContourFilter may split triangles because of the FeatureAngle
ivar. Subsequent rea ...
[Truncated]
See Also:
vtkContourFilter vtkMarchingCubes vtkSliceCubes vtkLocator
"""
class vtkMCubesWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkMCubesWriter - write binary marching cubes file
Super Class:
vtkPolyDataWriter
vtkMCubesWriter is a polydata writer that writes binary marching cubes
files. (Marching cubes is an isosurfacing technique that generates many
triangles.) The binary format is supported by W. Lorensen's marching cubes
program (and the vtkSliceCubes object). Each triangle is represented by
three records, with each record consisting of six single precision
floating point numbers representing the a triangle vertex coordinate and
vertex normal.
Caveats:
Binary files are written in sun/hp/sgi (i.e., Big Endian) form.
See Also:
vtkMarchingCubes vtkSliceCubes vtkMCubesReader
"""
class vtkMFIXReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkMFIXReader - reads a dataset in MFIX file format
Super Class:
vtkUnstructuredGridAlgorithm
vtkMFIXReader creates an unstructured grid dataset. It reads a restart
file and a set of sp files. The restart file contains the mesh
information. MFIX meshes are either cylindrical or rectilinear, but
this reader will convert them to an unstructured grid. The sp files
contain transient data for the cells. Each sp file has one or more
variables stored inside it.
See Also:
vtkGAMBITReader
"""
class vtkMINCImageReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkMINCImageReader - A reader for MINC files.
Super Class:
vtkImageReader2
MINC is a NetCDF-based medical image file format that was developed
at the Montreal Neurological Institute in 1992.
This class will read a MINC file into VTK, rearranging the data to
match the VTK x, y, and z dimensions, and optionally rescaling
real-valued data to VTK_FLOAT if RescaleRealValuesOn() is set.
If RescaleRealValues is off, then the data will be stored in its
original data type and the GetRescaleSlope(), GetRescaleIntercept()
method can be used to retrieve global rescaling parameters.
If the original file had a time dimension, the SetTimeStep() method
can be used to specify a time step to read.
All of the original header information can be accessed though the
GetImageAttributes() method.
See Also:
vtkMINCImageWriter vtkMINCImageAttributes
.SECTION Thanks
Thanks to David Gobbi for writing this class and Atamai Inc. for
contributing it to VTK.
"""
class vtkMINCImageWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkMINCImageWriter - A writer for MINC files.
Super Class:
vtkImageWriter
MINC is a NetCDF-based medical image file format that was developed
at the Montreal Neurological Institute in 1992.
The data is written slice-by-slice, and this writer is therefore
suitable for streaming MINC data that is larger than the memory
size through VTK. This writer can also produce files with up to
4 dimensions, where the fourth dimension is provided by using
AddInput() to specify multiple input data sets. If you want to
set header information for the file, you must supply a
vtkMINCImageAttributes
See Also:
vtkMINCImageReader vtkMINCImageAttributes
.SECTION Thanks
Thanks to David Gobbi for writing this class and Atamai Inc. for
contributing it to VTK.
"""
class vtkMarchingContourFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMarchingContourFilter - generate isosurfaces/isolines from scalar values
Super Class:
vtkPolyDataAlgorithm
vtkMarchingContourFilter is a filter that takes as input any dataset and
generates on output isosurfaces and/or isolines. The exact form
of the output depends upon the dimensionality of the input data.
Data consisting of 3D cells will generate isosurfaces, data
consisting of 2D cells will generate isolines, and data with 1D
or 0D cells will generate isopoints. Combinations of output type
are possible if the input dimension is mixed.
This filter will identify special dataset types (e.g., structured
points) and use the appropriate specialized filter to process the
data. For examples, if the input dataset type is a volume, this
filter will create an internal vtkMarchingCubes instance and use
it. This gives much better performance.
To use this filter you must specify one or more contour values.
You can either use the method SetValue() to specify each contour
value, or use GenerateValues() to generate a series of evenly
spaced contours. It is also possible to accelerate the operation of
this filter (at the cost of extra memory) by using a
vtkScalarTree. A scalar tree is used to quickly locate cells that
contain a contour surface. This is especially effective if multiple
contours are being extracted. If you want to use a scalar tree,
invoke the method UseScalarTreeOn().
Caveats:
For unstructured data or structured grids, normals and gradients
are not computed. This calculation will be implemented in the
future. In the mean time, use vtkPolyDataNormals to compute the surface
normals.
See Also:
vtkMarchingCubes vtkSliceCubes vtkDividingCubes vtkMarchingSquares
vtkImageMarchingCubes
"""
class vtkMarchingCubes:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMarchingCubes - generate isosurface(s) from volume
Super Class:
vtkPolyDataAlgorithm
vtkMarchingCubes is a filter that takes as input a volume (e.g., 3D
structured point set) and generates on output one or more isosurfaces.
One or more contour values must be specified to generate the isosurfaces.
Alternatively, you can specify a min/max scalar range and the number of
contours to generate a series of evenly spaced contour values.
Caveats:
This filter is specialized to volumes. If you are interested in
contouring other types of data, use the general vtkContourFilter. If you
want to contour an image (i.e., a volume slice), use vtkMarchingSquares.
See Also:
vtkContourFilter vtkSliceCubes vtkMarchingSquares vtkDividingCubes
"""
class vtkMarchingSquares:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMarchingSquares - generate isoline(s) from structured points set
Super Class:
vtkPolyDataAlgorithm
vtkMarchingSquares is a filter that takes as input a structured points set
and generates on output one or more isolines. One or more contour values
must be specified to generate the isolines. Alternatively, you can specify
a min/max scalar range and the number of contours to generate a series of
evenly spaced contour values.
To generate contour lines the input data must be of topological dimension 2
(i.e., an image). If not, you can use the ImageRange ivar to select an
image plane from an input volume. This avoids having to extract a plane first
(using vtkExtractSubVolume). The filter deals with this by first
trying to use the input data directly, and if not a 2D image, then uses the
ImageRange ivar to reduce it to an image.
Caveats:
This filter is specialized to images. If you are interested in
contouring other types of data, use the general vtkContourFilter.
See Also:
vtkContourFilter vtkMarchingCubes vtkSliceCubes vtkDividingCubes
"""
class vtkMaskPoints:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMaskPoints - selectively filter points
Super Class:
vtkPolyDataAlgorithm
vtkMaskPoints is a filter that passes through points and point attributes
from input dataset. (Other geometry is not passed through.) It is
possible to mask every nth point, and to specify an initial offset
to begin masking from. A special random mode feature enables random
selection of points. The filter can also generate vertices (topological
primitives) as well as points. This is useful because vertices are
rendered while points are not.
"""
class vtkMaskPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMaskPolyData - sample subset of input polygonal data cells
Super Class:
vtkPolyDataAlgorithm
vtkMaskPolyData is a filter that sub-samples the cells of input polygonal
data. The user specifies every nth item, with an initial offset to begin
sampling.
See Also:
vtkMaskPoints
"""
class vtkMassProperties:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMassProperties - estimate volume, area, shape index of triangle mesh
Super Class:
vtkPolyDataAlgorithm
vtkMassProperties estimates the volume, the surface area, and the
normalized shape index of a triangle mesh. The algorithm
implemented here is based on the discrete form of the divergence
theorem. The general assumption here is that the model is of
closed surface. For more details see the following reference
(Alyassin A.M. et al, "Evaluation of new algorithms for the
interactive measurement of surface area and volume", Med Phys 21(6)
1994.).
Caveats:
Currently only triangles are processed. Use vtkTriangleFilter to
convert any strips or polygons to triangles.
See Also:
vtkTriangleFilter
"""
class vtkMedicalImageReader2:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMedicalImageReader2 - vtkImageReader2 with medical meta data.
Super Class:
vtkImageReader2
vtkMedicalImageReader2 is a parent class for medical image readers.
It provides a place to store patient information that may be stored
in the image header.
See Also:
vtkImageReader2 vtkGESignaReader vtkMedicalImageProperties
"""
class vtkMergeColumns:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMergeColumns - merge two columns into a single column
Super Class:
vtkTableAlgorithm
vtkMergeColumns replaces two columns in a table with a single column
containing data in both columns. The columns are set using
SetInputArrayToProcess(0, 0, 0, vtkDataObject::FIELD_ASSOCIATION_NONE, "col1")
and
SetInputArrayToProcess(1, 0, 0, vtkDataObject::FIELD_ASSOCIATION_NONE, "col2")
where "col1" and "col2" are the names of the columns to merge.
The user may also specify the name of the merged column.
The arrays must be of the same type.
If the arrays are numeric, the values are summed in the merged column.
If the arrays are strings, the values are concatenated. The strings are
separated by a space if they are both nonempty.
"""
class vtkMergeDataObjectFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMergeDataObjectFilter - merge dataset and data object field to create dataset with attribute data
Super Class:
vtkDataSetAlgorithm
vtkMergeDataObjectFilter is a filter that merges the field from a
vtkDataObject with a vtkDataSet. The resulting combined dataset can
then be processed by other filters (e.g.,
vtkFieldDataToAttributeDataFilter) to create attribute data like
scalars, vectors, etc.
The filter operates as follows. The field data from the
vtkDataObject is merged with the input's vtkDataSet and then placed
in the output. You can choose to place the field data into the cell
data field, the point data field, or the datasets field (i.e., the
one inherited from vtkDataSet's superclass vtkDataObject). All this
data shuffling occurs via reference counting, therefore memory is
not copied.
One of the uses of this filter is to allow you to read/generate the
structure of a dataset independent of the attributes. So, for
example, you could store the dataset geometry/topology in one file,
and field data in another. Then use this filter in combination with
vtkFieldDataToAttributeData to create a dataset ready for
processing in the visualization pipeline.
"""
class vtkMergeFields:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMergeFields - Merge multiple fields into one.
Super Class:
vtkDataSetAlgorithm
vtkMergeFields is used to merge mutliple field into one.
The new field is put in the same field data as the original field.
For example
@verbatim
mf->SetOutputField("foo", vtkMergeFields::POINT_DATA);
mf->SetNumberOfComponents(2);
mf->Merge(0, "array1", 1);
mf->Merge(1, "array2", 0);
@endverbatim
will tell vtkMergeFields to use the 2nd component of array1 and
the 1st component of array2 to create a 2 component field called foo.
The same can be done using Tcl:
@verbatim
mf SetOutputField foo POINT_DATA
mf Merge 0 array1 1
mf Merge 1 array2 0
Field locations: DATA_OBJECT, POINT_DATA, CELL_DATA
@endverbatim
See Also:
vtkFieldData vtkDataSet vtkDataObjectToDataSetFilter
vtkDataSetAttributes vtkDataArray vtkRearrangeFields
vtkSplitField vtkAssignAttribute
"""
class vtkMergeFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMergeFilter - extract separate components of data from different datasets
Super Class:
vtkDataSetAlgorithm
vtkMergeFilter is a filter that extracts separate components of data from
different datasets and merges them into a single dataset. The output from
this filter is of the same type as the input (i.e., vtkDataSet.) It treats
both cell and point data set attributes.
"""
class vtkMergeTables:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMergeTables - combine two tables
Super Class:
vtkTableAlgorithm
Combines the columns of two tables into one larger table.
The number of rows in the resulting table is the sum of the number of
rows in each of the input tables.
The number of columns in the output is generally the sum of the number
of columns in each input table, except in the case where column names
are duplicated in both tables.
In this case, if MergeColumnsByName is on (the default), the two columns
will be merged into a single column of the same name.
If MergeColumnsByName is off, both columns will exist in the output.
You may set the FirstTablePrefix and SecondTablePrefix to define how
the columns named are modified. One of these prefixes may be the empty
string, but they must be different.
"""
class vtkMeshQuality:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMeshQuality - Calculate measures of quality of a mesh
Super Class:
vtkDataSetAlgorithm
vtkMeshQuality computes one or more measures of (geometric)
quality for each 2-D and 3-D cell (triangle, quadrilateral, tetrahedron,
or hexahedron) of a mesh. These measures of quality are then averaged
over the entire mesh. The minimum, average, maximum, and variance
of quality for each type of cell is stored in the output mesh's FieldData.
The FieldData arrays are named "Mesh Triangle Quality,"
"Mesh Quadrilateral Quality," "Mesh Tetrahedron Quality,"
and "Mesh Hexahedron Quality." Each array has a single tuple
with 5 components. The first 4 components are the quality statistics
mentioned above; the final value is the number of cells of the given type.
This final component makes aggregation of statistics for distributed
mesh data possible.
By default, the per-cell quality is added to the mesh's cell data, in
an array named "Quality." Cell types not supported by
this filter will have an entry of 0. Use SaveCellQualityOff() to
store only the final statistics.
This version of the filter written by Philippe Pebay and David Thompson
overtakes an older version written by Leila Baghdadi, Hanif Ladak, and
David Steinman at the Imaging Research Labs, Robarts Research Institute.
That version only supported tetrahedral radius ratio. See the
CompatibilityModeOn() member for information on how to make this filter
behave like the previous implementation.
For more information on the triangle quality measures of this class, cf.
Pebay & Baker 2003, Analysis of triangle quality measures, Math Comp 72:244.
For more information on the quadrangle quality measures of this class, cf.
Pebay 2004, Planar Quadrangle Quality Measures, Eng Comp 20:2.
Caveats:
While more general than before, this class does not address many
cell types, including wedges and pyramids in 3D and triangle strips
and fans in 2D (among others).
Most quadrilateral quality measures are intended for planar quadrilaterals
only.
The minimal angle is not, strictly speaking, a quality measure, but it is
provided because of its useage by many authors.
"""
class vtkMetaImageReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkMetaImageReader - read binary UNC meta image data
Super Class:
vtkImageReader2
One of the formats for which a reader is already available in the toolkit is
the MetaImage file format. This is a fairly simple yet powerful format
consisting of a text header and a binary data section. The following
instructions describe how you can write a MetaImage header for the data that
you download from the BrainWeb page.
The minimal structure of the MetaImage header is the following:
NDims = 3
DimSize = 181 217 181
ElementType = MET_UCHAR
ElementSpacing = 1.0 1.0 1.0
ElementByteOrderMSB = False
ElementDataFile = brainweb1.raw
* NDims indicate that this is a 3D image. ITK can handle images of
arbitrary dimension.
* DimSize indicates the size of the volume in pixels along each
direction.
* ElementType indicate the primitive type used for pixels. In this case
is "unsigned char", implying that the data is digitized in 8 bits /
pixel.
* ElementSpacing indicates the physical separation between the center of
one pixel and the center of the next pixel along each direction in space.
The units used are millimeters.
* ElementByteOrderMSB indicates is the data is encoded in little or big
endian order. You might want to play with this value when moving data
between different computer platforms.
* ElementDataFile is the name of the file containing the raw binary data
of the image. This file must be in the same directory as the header.
MetaImage headers are expected to have extension: ".mha" or ".mhd"
Once you write this header text file, it should be possible to read the
image into your ITK based application using the itk::FileIOToImageFilter
class.
Caveats:
See Also:
"""
class vtkMetaImageWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkMetaImageWriter - write a binary UNC meta image data
Super Class:
vtkImageWriter
One of the formats for which a reader is already available in the toolkit is
the MetaImage file format. This is a fairly simple yet powerful format
consisting of a text header and a binary data section. The following
instructions describe how you can write a MetaImage header for the data that
you download from the BrainWeb page.
The minimal structure of the MetaImage header is the following:
NDims = 3
DimSize = 181 217 181
ElementType = MET_UCHAR
ElementSpacing = 1.0 1.0 1.0
ElementByteOrderMSB = False
ElementDataFile = brainweb1.raw
* NDims indicate that this is a 3D image. ITK can handle images of
arbitrary dimension.
* DimSize indicates the size of the volume in pixels along each
direction.
* ElementType indicate the primitive type used for pixels. In this case
is "unsigned char", implying that the data is digitized in 8 bits /
pixel.
* ElementSpacing indicates the physical separation between the center of
one pixel and the center of the next pixel along each direction in space.
The units used are millimeters.
* ElementByteOrderMSB indicates is the data is encoded in little or big
endian order. You might want to play with this value when moving data
between different computer platforms.
* ElementDataFile is the name of the file containing the raw binary data
of the image. This file must be in the same directory as the header.
MetaImage headers are expected to have extension: ".mha" or ".mhd"
Once you write this header text file, it should be possible to read the
image into your ITK based application using the itk::FileIOToImageFilter
class.
Caveats:
See Also:
vtkImageWriter vtkMetaImageReader
"""
class vtkMultiBlockDataSetAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiBlockDataSetAlgorithm - Superclass for algorithms that produce only vtkMultiBlockDataSet as output
Super Class:
vtkAlgorithm
Algorithms that take any type of data object (including composite dataset)
and produce a vtkMultiBlockDataSet in the output can subclass from this
class.
"""
class vtkMultiBlockMergeFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiBlockMergeFilter - merges multiblock inputs into a single multiblock output
Super Class:
vtkMultiBlockDataSetAlgorithm
vtkMultiBlockMergeFilter is an M to 1 filter similar to
vtkMultiGroupDataGroupFilters. However where as that class creates N groups
in the output for N inputs, this creates 1 group in the output with N
datasets inside it. In actuality if the inputs have M groups, this will
produce M groups, each of which has N datasets. Inside the merged group,
the i'th data set comes from the i'th data set in the i'th input.
"""
class vtkMultiBlockPLOT3DReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkMultiBlockPLOT3DReader - read PLOT3D data files
Super Class:
vtkMultiBlockDataSetAlgorithm
vtkMultiBlockPLOT3DReader is a reader object that reads PLOT3D formatted
files and generates structured grid(s) on output. PLOT3D is a computer
graphics program designed to visualize the grids and solutions of
computational fluid dynamics. Please see the "PLOT3D User's Manual"
available from NASA Ames Research Center, Moffett Field CA.
PLOT3D files consist of a grid file (also known as XYZ file), an
optional solution file (also known as a Q file), and an optional function
file that contains user created data (currently unsupported). The Q file
contains solution information as follows: the four parameters free stream
mach number (Fsmach), angle of attack (Alpha), Reynolds number (Re), and
total integration time (Time). This information is stored in an array
called Properties in the FieldData of each output (tuple 0: fsmach, tuple 1:
alpha, tuple 2: re, tuple 3: time). In addition, the solution file contains
the flow density (scalar), flow momentum (vector), and flow energy (scalar).
The reader can generate additional scalars and vectors (or "functions")
from this information. To use vtkMultiBlockPLOT3DReader, you must specify the
particular function number for the scalar and vector you want to visualize.
This implementation of the reader provides the following functions. The
scalar functions are:
-1 - don't read or compute any scalars
100 - density
110 - pressure
120 - temperature
130 - enthalpy
140 - internal energy
144 - kinetic energy
153 - velocity magnitude
163 - stagnation energy
170 - entropy
184 - swirl.
The vector functions are:
-1 - don't read or compute any vectors
200 - velocity
201 - vorticity
202 - momentum
210 - pressure gradient.
(Other functions are described in the PLOT3D spec, but only those listed are
implemented here.) Note that by default, this reader creates the density
scalar (100) and momentum vector (202) as output. (These are just read in
from the solution file.) Please note that the validity of computation is
a function of this class's gas constants (R, Gamma) and the equations used.
They may not be suitable for your computational domain.
Additionally, you can read other data and associate it as a vtkDataArray
into the output's point attribute data. Use the method AddFunction()
to list all the functions that you'd like to read. AddFunction() accepts
an integer parameter that defines the function number.
See Also:
vtkStructuredGridSource vtkStructuredGrid
"""
class vtkMultiGroupDataExtractDataSets:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiGroupDataExtractDataSets - extract a number of datasets
Super Class:
vtkMultiGroupDataSetAlgorithm
vtkMultiGroupDataExtractDataSets extracts the user specified list
of datasets from a multi-group dataset.
"""
class vtkMultiGroupDataExtractGroup:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiGroupDataExtractGroup - extact groups between min and max
Super Class:
vtkMultiGroupDataSetAlgorithm
vtkMultiGroupDataExtractGroup is a filter that extracts groups
between user specified min and max.
"""
class vtkMultiGroupDataGeometryFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiGroupDataGeometryFilter - extract geometry from multi-group data
Super Class:
vtkPolyDataAlgorithm
vtkMultiGroupDataGeometryFilter applies vtkGeometryFilter to all
groups in vtkMultiGroupData. Place this filter at the end of a
pipeline before a polydata consumer such as a polydata mapper to extract
geometry from all blocks and append them to one polydata object.
"""
class vtkMultiGroupDataGroupFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiGroupDataGroupFilter - collects multiple inputs into one multi-group dataset
Super Class:
vtkMultiGroupDataSetAlgorithm
vtkMultiGroupDataGroupFilter is an M to 1 filter that merges multiple
input into one multi-group dataset. It will assign each input to
one group of the multi-group dataset and will assign each update piece
as a sub-block. For example, if there are two inputs and four update
pieces, the output contains two groups with four datasets each.
"""
class vtkMultiGroupDataGroupIdScalars:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiGroupDataGroupIdScalars - generate scalars from groups
Super Class:
vtkMultiGroupDataSetAlgorithm
vtkMultiGroupDataGroupIdScalars is a filter to that generates scalars
using multi-group data group information. For example, it will assign
an vtkUnsignedCharArray named GroupIdScalars and of value 0 to all
datasets in group 0.
"""
class vtkMultiGroupDataSetAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiGroupDataSetAlgorithm - Superclass for algorithms that produce only vtkMultiGroupDataSet as output
Super Class:
vtkAlgorithm
Algorithms that take any type of data object (including composite dataset)
and produce a vtkMultiGroupDataSet in the output can subclass from this
class.
"""
class vtkMultiGroupProbeFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiGroupProbeFilter - subclass of vtkProbeFilter which supports
Super Class:
vtkProbeFilter
vtkMultiGroupProbeFilter supports probing into multi-group datasets.
It sequentially probes through each concrete dataset within the multigroup
probing at only those locations at which there were no hits when probing
earlier datasets. For Hierarchical datasets, this traversal through leaf
datasets is done in reverse order of levels i.e. highest level first.
"""
class vtkMultiThreshold:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkMultiThreshold - Threshold cells within multiple intervals
Super Class:
vtkMultiBlockDataSetAlgorithm
This filter can be substituted for a chain of several vtkThreshold filters
and can also perform more sophisticated subsetting operations.
It generates a vtkMultiBlockDataSet as its output.
This multiblock dataset contains a vtkUnstructuredGrid for each thresholded
subset you request.
A thresholded subset can be a set defined by an interval over a
point or cell attribute of the mesh; these subsets are called IntervalSets.
A thresholded subset can also be a boolean combination of one or more IntervalSets;
these subsets are called BooleanSets.
BooleanSets allow complex logic since their output
can depend on multiple intervals over multiple variables
defined on the input mesh.
This is useful because it eliminates the need for thresholding several
times and then appending the results, as can be required with vtkThreshold
when one wants to remove some range of values (e.g., a notch filter).
Cells are not repeated when they belong to more than one interval unless
those intervals have different output grids.
Another advantage this filter provides over vtkThreshold is the ability
to threshold on non-scalar (i.e., vector, tensor, etc.) attributes without
first computing an array containing some norm of the desired attribute.
vtkMultiThreshold provides \f$L_1\f$, \f$L_2\f$, and \f$L_{\infty}\f$ norms.
This filter makes a distinction between intermediate subsets and
subsets that will be output to a grid.
Each intermediate subset you create with AddIntervalSet or
AddBooleanSet is given a unique integer identifier (via the return
values of these member functions).
If you wish for a given set to be output, you must call
OutputSet and pass it one of these identifiers.
The return of OutputSet is the integer index of the output set
in the multiblock dataset created by this filter.
For example, if an input mesh defined three attributes T, P, and s, one might
wish to find cells that satisfy "T < 320 [K] && ( P > 101 [kPa] || s < 0.1 [kJ/kg/K] )".
To accomplish this with a vtkMultiThreshold filter,
<pre>
vtkMultiThreshold* thr;
int intervalSets[3];
intervalSets[0] = thr->AddIntervalSet( vtkMath::NegInf(), 320., vtkMultiThreshold::CLOSED, vtkMultiThreshold::OPEN,
vtkDataObject::FIELD_ASSOCIATION_POINTS, "T", 0, 1 );
intervalSets[1] = thr->AddIntervalSet( 101., vtkMath::Inf(), vtkMultiThreshold::OPEN, vtkMultiThreshold::CLOSED,
vtkDataObject::FIELD_ASSOCIATION_CELLS, "P", 0, 1 );
intervalSets[2] = thr->AddIntervalSet( vtkMath::NegInf(), 0.1, vtkMultiThreshold::CLOSED, vtkMultiThreshold::OPEN,
vtkDataObject::FIELD_ASSOCIATION_POINTS, "s", 0, 1 );
int intermediate = thr->AddBooleanSet( vtkMultiThreshold::OR, 2, &intervalSets[1] );
int intersection[2];
intersection[0] = intervalSets[0];
intersection[1] = intermediate;
int outputSet = thr->AddBooleanSet( vtkMultiThreshold::AND, 2, intersection );
int outputGridIndex = thr->OutputSet( outputSet );
thr->Update();
</pre>
The result of this filter will be a multiblock dataset that contains a single child with the desired cells.
If we had also called <code>thr->OutputSet( intervalSets[0] );</code>, there would be two child meshes and
one would contain all cells with T < 320 [K].
In that case, the output can be represented by this graph
\dot
digraph MultiThreshold {
set0 [shape=rect,style=filled,label="point T(0) in [-Inf,320["]
set1 [shape=rect,label="cell P(0) in ]101,Inf]"]
set2 [shape=rect,label="point s(0) in [-Inf,0.1["]
set3 [shape=rect,label="OR"]
set4 [shape=rect,style=filled,label="AND"]
set0 -> set4
set1 -> set3
set2 -> set3
set3 -> set4
}
\enddot
The filled rectangles represent sets that are output.
"""
class vtkOBBDicer:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkOBBDicer - divide dataset into spatially aggregated pieces
Super Class:
vtkDicer
vtkOBBDicer separates the cells of a dataset into spatially
aggregated pieces using a Oriented Bounding Box (OBB). These pieces
can then be operated on by other filters (e.g., vtkThreshold). One
application is to break very large polygonal models into pieces and
performing viewing and occlusion culling on the pieces.
Refer to the superclass documentation (vtkDicer) for more information.
See Also:
vtkDicer vtkConnectedDicer
"""
class vtkOBJReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkOBJReader - read Wavefront .obj files
Super Class:
vtkPolyDataAlgorithm
vtkOBJReader is a source object that reads Wavefront .obj
files. The output of this source object is polygonal data.
See Also:
vtkOBJImporter
"""
class vtkOpenFOAMReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkOpenFOAMReader - reads a dataset in OpenFOAM format
Super Class:
vtkMultiBlockDataSetAlgorithm
vtkOpenFOAMReader creates an multiblock dataset. It reads a controlDict
file, mesh information, and time dependent data. The controlDict file
contains timestep information. The polyMesh folders contain mesh information
The time folders contain transient data for the cells Each folder can
contain any number of data files.
"""
class vtkOpenGLTexture:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkOpenGLTexture - OpenGL texture map
Super Class:
vtkTexture
vtkOpenGLTexture is a concrete implementation of the abstract class
vtkTexture. vtkOpenGLTexture interfaces to the OpenGL rendering library.
"""
class vtkOutlineCornerFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkOutlineCornerFilter - create wireframe outline corners for arbitrary data set
Super Class:
vtkPolyDataAlgorithm
vtkOutlineCornerFilter is a filter that generates wireframe outline corners of any
data set. The outline consists of the eight corners of the dataset
bounding box.
"""
class vtkOutlineCornerSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkOutlineCornerSource - create wireframe outline corners around bounding box
Super Class:
vtkOutlineSource
vtkOutlineCornerSource creates wireframe outline corners around a user-specified
bounding box.
"""
class vtkOutlineFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkOutlineFilter - create wireframe outline for arbitrary data set
Super Class:
vtkPolyDataAlgorithm
vtkOutlineFilter is a filter that generates a wireframe outline of any
data set. The outline consists of the twelve edges of the dataset
bounding box.
"""
class vtkOutlineSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkOutlineSource - create wireframe outline around bounding box
Super Class:
vtkPolyDataAlgorithm
vtkOutlineSource creates a wireframe outline around a
user-specified bounding box. The outline may be created aligned
with the {x,y,z} axis - in which case it is defined by the 6 bounds
{xmin,xmax,ymin,ymax,zmin,zmax} via SetBounds(). Alternatively, the
box may be arbitrarily aligned, in which case it should be set via
the SetCorners() member.
"""
class vtkPDBReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkPDBReader - read Molecular Data files
Super Class:
vtkMoleculeReaderBase
vtkPDBReader is a source object that reads Molecule files
The FileName must be specified
.SECTION Thanks
Dr. Jean M. Favre who developed and contributed this class
"""
class vtkPExodusIIReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkPExodusIIReader - Read Exodus II files (.exii)
Super Class:
vtkExodusIIReader
vtkPExodusIIReader is a unstructured grid source object that reads
ExodusII files. Most of the meta data associated with the
file is loaded when UpdateInformation is called. This includes
information like Title, number of blocks, number and names of
arrays. This data can be retrieved from methods in this
reader. Separate arrays that are meant to be a single vector, are
combined internally for convenience. To be combined, the array
names have to be identical except for a trailing X,Y and Z (or
x,y,z). By default all cell and point arrays are loaded. However,
the user can flag arrays not to load with the methods
"SetPointDataArrayLoadFlag" and "SetCellDataArrayLoadFlag". The
reader responds to piece requests by loading only a range of the
possible blocks. Unused points are filtered out internally.
"""
class vtkPLOT3DReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkPLOT3DReader - read PLOT3D data files
Super Class:
vtkStructuredGridSource
vtkPLOT3DReader is a reader object that reads PLOT3D formatted files and
generates structured grid(s) on output. PLOT3D is a computer graphics
program designed to visualize the grids and solutions of computational
fluid dynamics. Please see the "PLOT3D User's Manual" available from
NASA Ames Research Center, Moffett Field CA.
PLOT3D files consist of a grid file (also known as XYZ file), an
optional solution file (also known as a Q file), and an optional function
file that contains user created data (currently unsupported). The Q file
contains solution information as follows: the four parameters free stream
mach number (Fsmach), angle of attack (Alpha), Reynolds number (Re), and
total integration time (Time). This information is stored in an array
called Properties in the FieldData of each output (tuple 0: fsmach, tuple 1:
alpha, tuple 2: re, tuple 3: time). In addition, the solution file contains
the flow density (scalar), flow momentum (vector), and flow energy (scalar).
The reader can generate additional scalars and vectors (or "functions")
from this information. To use vtkPLOT3DReader, you must specify the
particular function number for the scalar and vector you want to visualize.
This implementation of the reader provides the following functions. The
scalar functions are:
-1 - don't read or compute any scalars
100 - density
110 - pressure
120 - temperature
130 - enthalpy
140 - internal energy
144 - kinetic energy
153 - velocity magnitude
163 - stagnation energy
170 - entropy
184 - swirl.
The vector functions are:
-1 - don't read or compute any vectors
200 - velocity
201 - vorticity
202 - momentum
210 - pressure gradient.
(Other functions are described in the PLOT3D spec, but only those listed are
implemented here.) Note that by default, this reader creates the density
scalar (100) and momentum vector (202) as output. (These are just read in
from the solution file.) Please note that the validity of computation is
a function of this class's gas constants (R, Gamma) and the equations used.
They may not be suitable for your computational domain.
Additionally, you can read other data and associate it as a vtkDataArray
into the output's point attribute data. Use the method AddFunction()
to list all the functions that you'd like to read. AddFunction() accepts
an integer parameter that defines the function number.
See Also:
vtkStructuredGridSource vtkStructuredGrid
"""
class vtkPLYReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkPLYReader - read Stanford University PLY polygonal file format
Super Class:
vtkPolyDataAlgorithm
vtkPLYReader is a source object that reads polygonal data in
Stanford University PLY file format (see
http://graphics.stanford.edu/data/3Dscanrep). It requires that
the elements "vertex" and "face" are defined. The "vertex" element
must have the properties "x", "y", and "z". The "face" element must
have the property "vertex_indices" defined. Optionally, if the "face"
element has the properties "intensity" and/or the triplet "red",
"green", and "blue"; these are read and added as scalars to the
output data.
See Also:
vtkPLYWriter
"""
class vtkPLYWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkPLYWriter - write Stanford PLY file format
Super Class:
vtkPolyDataWriter
vtkPLYWriter writes polygonal data in Stanford University PLY format
(see http://graphics.stanford.edu/data/3Dscanrep/). The data can be
written in either binary (little or big endian) or ASCII representation.
As for PointData and CellData, vtkPLYWriter cannot handle normals or
vectors. It only handles RGB PointData and CellData. You need to set the
name of the array (using SetName for the array and SetArrayName for the
writer). If the array is not a vtkUnsignedCharArray with 3 components,
you need to specify a vtkLookupTable to map the scalars to RGB.
Caveats:
PLY does not handle big endian versus little endian correctly. Also,
this class is compiled into VTK only if the PLY library is found
during the make process (using CMake).
See Also:
vtkPLYReader
"""
class vtkPNGReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkPNGReader - read PNG files
Super Class:
vtkImageReader2
vtkPNGReader is a source object that reads PNG files.
It should be able to read most any PNG file
See Also:
vtkPNGWriter
"""
class vtkPNGWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkPNGWriter - Writes PNG files.
Super Class:
vtkImageWriter
vtkPNGWriter writes PNG files. It supports 1 to 4 component data of
unsigned char or unsigned short
See Also:
vtkPNGReader
"""
class vtkPNMReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkPNMReader - read pnm (i.e., portable anymap) files
Super Class:
vtkImageReader
vtkPNMReader is a source object that reads pnm (portable anymap) files.
This includes .pbm (bitmap), .pgm (grayscale), and .ppm (pixmap) files.
(Currently this object only reads binary versions of these files.)
PNMReader creates structured point datasets. The dimension of the
dataset depends upon the number of files read. Reading a single file
results in a 2D image, while reading more than one file results in a
3D volume.
To read a volume, files must be of the form "FileName.<number>" (e.g.,
foo.ppm.0, foo.ppm.1, ...). You must also specify the DataExtent. The
fifth and sixth values of the DataExtent specify the beginning and ending
files to read.
"""
class vtkPNMWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkPNMWriter - Writes PNM (portable any map) files.
Super Class:
vtkImageWriter
vtkPNMWriter writes PNM file. The data type
of the file is unsigned char regardless of the input type.
"""
class vtkParametricFunctionSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkParametricFunctionSource - tessellate parametric functions
Super Class:
vtkPolyDataAlgorithm
This class tessellates parametric functions. The user must specify how
many points in the parametric coordinate directions are required (i.e.,
the resolution), and the mode to use to generate scalars.
.SECTION Thanks
Andrew Maclean a.maclean@cas.edu.au for creating and contributing the
class.
See Also:
vtkParametricFunction
Implementation of parametrics for 1D lines:
vtkParametricSpline
Subclasses of vtkParametricFunction implementing non-orentable surfaces:
vtkParametricBoy vtkParametricCrossCap vtkParametricFigure8Klein
vtkParametricKlein vtkParametricMobius vtkParametricRoman
Subclasses of vtkParametricFunction implementing orientable surfaces:
vtkParametricConicSpiral vtkParametricDini vtkParametricEllipsoid
vtkParametricEnneper vtkParametricRandomHills vtkParametricSuperEllipsoid
vtkParametricSuperToroid vtkParametricTorus
"""
class vtkParticleReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkParticleReader - Read ASCII or binary particle
Super Class:
vtkPolyDataAlgorithm
vtkParticleReader reads either a binary or a text file of
particles. Each particle can have associated with it an optional
scalar value. So the format is: x, y, z, scalar
(all floats or doubles). The text file can consist of a comma
delimited set of values. In most cases vtkParticleReader can
automatically determine whether the file is text or binary.
The data can be either float or double.
Progress updates are provided.
With respect to binary files, random access into the file to read
pieces is supported.
"""
class vtkPiecewiseFunctionAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPiecewiseFunctionAlgorithm - Superclass for algorithms that produce only piecewise function as output
Super Class:
vtkAlgorithm
"""
class vtkPiecewiseFunctionShiftScale:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPiecewiseFunctionShiftScale -
Super Class:
vtkPiecewiseFunctionAlgorithm
"""
class vtkPlaneSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPlaneSource - create an array of quadrilaterals located in a plane
Super Class:
vtkPolyDataAlgorithm
vtkPlaneSource creates an m x n array of quadrilaterals arranged as
a regular tiling in a plane. The plane is defined by specifying an
origin point, and then two other points that, together with the
origin, define two axes for the plane. These axes do not have to be
orthogonal - so you can create a parallelogram. (The axes must not
be parallel.) By default, the plane is centered at the origin and
perpendicular to the z-axis, with width and height of length 1. The
resolution of the plane (i.e., number of subdivisions) is
controlled by the ivars XResolution and YResolution.
There are three convenience methods that allow you to easily move the
plane. The first, SetNormal(), allows you to specify the plane
normal. The effect of this method is to rotate the plane around the center
of the plane, aligning the plane normal with the specified normal. The
rotation is about the axis defined by the cross product of the current
normal with the new normal. The second, SetCenter(), translates the center
of the plane to the specified center point. The third method, Push(),
allows you to translate the plane along the plane normal by the distance
specified. (Negative Push values translate the plane in the negative
normal direction.) Note that the SetNormal(), SetCenter() and Push()
methods modify the Origin, Point1, and/or Point2 instance variables.
Caveats:
The normal to the plane will point in the direction of the cross product
of the first axis (Origin->Point1) with the second (Origin->Point2). This
also affects the normals to the generated polygons.
"""
class vtkPlatonicSolidSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPlatonicSolidSource - produce polygonal Platonic solids
Super Class:
vtkPolyDataAlgorithm
vtkPlatonicSolidSource can generate each of the five Platonic solids:
tetrahedron, cube, octahedron, icosahedron, and dodecahedron. Each of the
solids is placed inside a sphere centered at the origin with radius 1.0.
To use this class, simply specify the solid to create. Note that this
source object creates cell scalars that are (integral value) face numbers.
"""
class vtkPointDataToCellData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPointDataToCellData - map point data to cell data
Super Class:
vtkDataSetAlgorithm
vtkPointDataToCellData is a filter that transforms point data (i.e., data
specified per point) into cell data (i.e., data specified per cell).
The method of transformation is based on averaging the data
values of all points defining a particular cell. Optionally, the input point
data can be passed through to the output as well.
Caveats:
This filter is an abstract filter, that is, the output is an abstract type
(i.e., vtkDataSet). Use the convenience methods (e.g.,
GetPolyDataOutput(), GetStructuredPointsOutput(), etc.) to get the type
of output you want.
See Also:
vtkPointData vtkCellData vtkCellDataToPointData
"""
class vtkPointLoad:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPointLoad - compute stress tensors given point load on semi-infinite domain
Super Class:
vtkImageAlgorithm
vtkPointLoad is a source object that computes stress tensors on a volume.
The tensors are computed from the application of a point load on a
semi-infinite domain. (The analytical results are adapted from Saada - see
text.) It also is possible to compute effective stress scalars if desired.
This object serves as a specialized data generator for some of the examples
in the text.
See Also:
vtkTensorGlyph, vtkHyperStreamline
"""
class vtkPointSetAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPointSetAlgorithm - Superclass for algorithms that produce output of the same type as input
Super Class:
vtkAlgorithm
vtkPointSetAlgorithm is a convenience class to make writing algorithms
easier. It is also designed to help transition old algorithms to the new
pipeline architecture. Ther are some assumptions and defaults made by this
class you should be aware of. This class defaults such that your filter
will have one input port and one output port. If that is not the case
simply change it with SetNumberOfInputPorts etc. See this classes
contstructor for the default. This class also provides a FillInputPortInfo
method that by default says that all inputs will be PointSet. If that
isn't the case then please override this method in your subclass. This
class breaks out the downstream requests into seperate functions such as
RequestDataObject RequestData and ExecuteInformation. The default
implementation of RequestDataObject will create an output data of the
same type as the input.
"""
class vtkPointSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPointSource - create a random cloud of points
Super Class:
vtkPolyDataAlgorithm
vtkPointSource is a source object that creates a user-specified number
of points within a specified radius about a specified center point.
By default location of the points is random within the sphere. It is
also possible to generate random points only on the surface of the
sphere.
"""
class vtkPolyDataAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPolyDataAlgorithm - Superclass for algorithms that produce only polydata as output
Super Class:
vtkAlgorithm
"""
class vtkPolyDataConnectivityFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPolyDataConnectivityFilter - extract polygonal data based on geometric connectivity
Super Class:
vtkPolyDataAlgorithm
vtkPolyDataConnectivityFilter is a filter that extracts cells that
share common points and/or satisfy a scalar threshold
criterion. (Such a group of cells is called a region.) The filter
works in one of six ways: 1) extract the largest connected region
in the dataset; 2) extract specified region numbers; 3) extract all
regions sharing specified point ids; 4) extract all regions sharing
specified cell ids; 5) extract the region closest to the specified
point; or 6) extract all regions (used to color regions).
This filter is specialized for polygonal data. This means it runs a bit
faster and is easier to construct visualization networks that process
polygonal data.
The behavior of vtkPolyDataConnectivityFilter can be modified by turning
on the boolean ivar ScalarConnectivity. If this flag is on, the
connectivity algorithm is modified so that cells are considered connected
only if 1) they are geometrically connected (share a point) and 2) the
scalar values of one of the cell's points falls in the scalar range
specified. This use of ScalarConnectivity is particularly useful for
selecting cells for later processing.
See Also:
vtkConnectivityFilter
"""
class vtkPolyDataNormals:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPolyDataNormals - compute normals for polygonal mesh
Super Class:
vtkPolyDataAlgorithm
vtkPolyDataNormals is a filter that computes point normals for a polygonal
mesh. The filter can reorder polygons to insure consistent orientation
across polygon neighbors. Sharp edges can be split and points duplicated
with separate normals to give crisp (rendered) surface definition. It is
also possible to globally flip the normal orientation.
The algorithm works by determining normals for each polygon and then
averaging them at shared points. When sharp edges are present, the edges
are split and new points generated to prevent blurry edges (due to
Gouraud shading).
Caveats:
Normals are computed only for polygons and triangle strips. Normals are
not computed for lines or vertices.
Triangle strips are broken up into triangle polygons. You may want to
restrip the triangles.
"""
class vtkPolyDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkPolyDataReader - read vtk polygonal data file
Super Class:
vtkDataReader
vtkPolyDataReader is a source object that reads ASCII or binary
polygonal data files in vtk format (see text for format details).
The output of this reader is a single vtkPolyData data object.
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkPolyData vtkDataReader
"""
class vtkPolyDataStreamer:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPolyDataStreamer - Stream appends input pieces to the output.
Super Class:
vtkPolyDataAlgorithm
vtkPolyDataStreamer initiates streaming by requesting pieces from its
single input it appends these pieces it to the requested output.
Note that since vtkPolyDataStreamer uses an append filter, all the
polygons generated have to be kept in memory before rendering. If
these do not fit in the memory, it is possible to make the vtkPolyDataMapper
stream. Since the mapper will render each piece separately, all the
polygons do not have to stored in memory.
.SECTION Note
The output may be slightly different if the pipeline does not handle
ghost cells properly (i.e. you might see seames between the pieces).
See Also:
vtkAppendFilter
"""
class vtkPolyDataToImageStencil:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPolyDataToImageStencil - clip an image with polydata
Super Class:
vtkImageStencilSource
vtkPolyDataToImageStencil will convert a vtkPolyData into an image
that can be used with vtkImageStencil or other vtk classes that apply
a stencil to an image.
See Also:
vtkPolyData vtkImageStencil vtkImplicitFunctionToImageStencil
"""
class vtkPolyDataWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkPolyDataWriter - write vtk polygonal data
Super Class:
vtkDataWriter
vtkPolyDataWriter is a source object that writes ASCII or binary
polygonal data files in vtk format. See text for format details.
Caveats:
Binary files written on one system may not be readable on other systems.
"""
class vtkPostScriptWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkPostScriptWriter - Writes an image as a PostScript file.
Super Class:
vtkImageWriter
vtkPostScriptWriter writes an image as a PostScript file using some
reasonable scalings and centered on the page which is assumed to be
about 8.5 by 11 inches. This is based loosely off of the code from
pnmtops.c. Right now there aren't any real options.
"""
class vtkProbeFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkProbeFilter - sample data values at specified point locations
Super Class:
vtkDataSetAlgorithm
vtkProbeFilter is a filter that computes point attributes (e.g., scalars,
vectors, etc.) at specified point positions. The filter has two inputs:
the Input and Source. The Input geometric structure is passed through the
filter. The point attributes are computed at the Input point positions
by interpolating into the source data. For example, we can compute data
values on a plane (plane specified as Input) from a volume (Source).
The cell data of the source data is copied to the output based on in
which source cell each input point is. If an array of the same name exists
both in source's point and cell data, only the one from the point data is
probed.
This filter can be used to resample data, or convert one dataset form into
another. For example, an unstructured grid (vtkUnstructuredGrid) can be
probed with a volume (three-dimensional vtkImageData), and then volume
rendering techniques can be used to visualize the results. Another example:
a line or curve can be used to probe data to produce x-y plots along
that line or curve.
"""
class vtkProgrammableAttributeDataFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkProgrammableAttributeDataFilter - manipulate attribute (cell and point) data via a user-specified function
Super Class:
vtkDataSetAlgorithm
vtkProgrammableAttributeDataFilter is a filter that allows you to write a
custom procedure to manipulate attribute data - either point or cell
data. For example, you could generate scalars based on a complex formula;
convert vectors to normals; compute scalar values as a function of
vectors, texture coords, and/or any other point data attribute; and so
on. The filter takes multiple inputs (input plus an auxiliary input list),
so you can write procedures that combine several dataset point
attributes. Note that the output of the filter is the same type
(topology/geometry) as the input.
The filter works as follows. It operates like any other filter (i.e.,
checking and managing modified and execution times, processing Update()
and Execute() methods, managing release of data, etc.), but the difference
is that the Execute() method simply invokes a user-specified function with
an optional (void *) argument (typically the "this" pointer in C++). It is
also possible to specify a function to delete the argument via
ExecuteMethodArgDelete().
To use the filter, you write a procedure to process the input datasets,
process the data, and generate output data. Typically, this means grabbing
the input point or cell data (using GetInput() and maybe GetInputList()),
operating on it (creating new point and cell attributes such as scalars,
vectors, etc.), and then setting the point and/or cell attributes in the
output dataset (you'll need to use GetOutput() to access the output).
(Note: besides C++, it is possible to do the same thing in Tcl, Java, or
other languages that wrap the C++ core.) Remember, proper filter protocol
requires that you don't modify the input data - you create new output data
from the input.
Caveats:
This filter operates on any combination of the filter input plus a list of
additional inputs (at a minimum you must set the filter input via
SetInput()). It is up to you check whether the input is valid, and to
insure that the output is valid. Also, you have to write the control
structure for the traversal and operation on the point and cell attribute
data.
By default the output point and cell data will be copied through from the
input point data (using ref ...
[Truncated]
"""
class vtkProgrammableDataObjectSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkProgrammableDataObjectSource - generate source data object via a user-specified function
Super Class:
vtkDataObjectAlgorithm
vtkProgrammableDataObjectSource is a source object that is programmable by
the user. The output of the filter is a data object (vtkDataObject) which
represents data via an instance of field data. To use this object, you
must specify a function that creates the output.
Example use of this filter includes reading tabular data and encoding it
as vtkFieldData. You can then use filters like vtkDataObjectToDataSetFilter
to convert the data object to a dataset and then visualize it. Another
important use of this class is that it allows users of interpreters (e.g.,
Tcl or Java) the ability to write source objects without having to
recompile C++ code or generate new libraries.
See Also:
vtkProgrammableFilter vtkProgrammableAttributeDataFilter
vtkProgrammableSource vtkDataObjectToDataSetFilter
"""
class vtkProgrammableFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkProgrammableFilter - a user-programmable filter
Super Class:
vtkDataSetAlgorithm
vtkProgrammableFilter is a filter that can be programmed by the user. To
use the filter you define a function that retrieves input of the correct
type, creates data, and then manipulates the output of the filter. Using
this filter avoids the need for subclassing - and the function can be
defined in an interpreter wrapper language such as Tcl or Java.
The trickiest part of using this filter is that the input and output
methods are unusual and cannot be compile-time type checked. Instead, as a
user of this filter it is your responsibility to set and get the correct
input and output types.
Caveats:
The filter correctly manages modified time and network execution in most
cases. However, if you change the definition of the filter function,
you'll want to send a manual Modified() method to the filter to force it
to reexecute.
See Also:
vtkProgrammablePointDataFilter vtkProgrammableSource
"""
class vtkProgrammableGlyphFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkProgrammableGlyphFilter - control the generation and placement of glyphs at input points
Super Class:
vtkPolyDataAlgorithm
vtkProgrammableGlyphFilter is a filter that allows you to place a glyph at
each input point in the dataset. In addition, the filter is programmable
which means the user has control over the generation of the glyph. The
glyphs can be controlled via the point data attributes (e.g., scalars,
vectors, etc.) or any other information in the input dataset.
This is the way the filter works. You must define an input dataset which
at a minimum contains points with associated attribute values. Also, the
Source instance variable must be set which is of type vtkPolyData. Then,
for each point in the input, the PointId is set to the current point id,
and a user-defined function is called (i.e., GlyphMethod). In this method
you can manipulate the Source data (including changing to a different
Source object). After the GlyphMethod is called,
vtkProgrammableGlyphFilter will invoke an Update() on its Source object,
and then copy its data to the output of the
vtkProgrammableGlyphFilter. Therefore the output of this filter is of type
vtkPolyData.
Another option to this filter is the way you color the glyphs. You can use
the scalar data from the input or the source. The instance variable
ColorMode controls this behavior.
Caveats:
This filter operates on point data attributes. If you want to use cell
data attributes, use a filter like vtkCellCenters to generate points at
the centers of cells, and then use these points.
Note that the data attributes (cell and point) are passed to the output of
this filter from the Source object. This works well as long as you are not
changing the class of the Source object during execution. However, if the
class of the Source object changes, then the pot ...
[Truncated]
See Also:
vtkGlyph3D vtkTensorGlyph vtkCellCenters
"""
class vtkProgrammableSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkProgrammableSource - generate source dataset via a user-specified function
Super Class:
vtkDataSetAlgorithm
vtkProgrammableSource is a source object that is programmable by the
user. To use this object, you must specify a function that creates the
output. It is possible to generate an output dataset of any (concrete)
type; it is up to the function to properly initialize and define the
output. Typically, you use one of the methods to get a concrete output
type (e.g., GetPolyDataOutput() or GetStructuredPointsOutput()), and
then manipulate the output in the user-specified function.
Example use of this include writing a function to read a data file or
interface to another system. (You might want to do this in favor of
deriving a new class.) Another important use of this class is that it
allows users of interpreters (e.g., Tcl or Java) the ability to write
source objects without having to recompile C++ code or generate new
libraries.
See Also:
vtkProgrammableFilter vtkProgrammableAttributeDataFilter
vtkProgrammableDataObjectSource
"""
class vtkProjectedTerrainPath:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkProjectedTerrainPath - project a polyline onto a terrain
Super Class:
vtkPolyDataAlgorithm
vtkProjectedTerrainPath projects an input polyline onto a terrain. (The
terrain is defined by a 2D height image and is the second input to the
filter.) The polyline projection is controlled via several modes as
follows. 1) Simple mode projects the polyline points onto the terrain,
taking into account the height offset instance variable. 2) Non-occluded
mode insures that no parts of the polyline are occluded by the terrain
(e.g. a line passes through a mountain). This may require recursive
subdivision of the polyline. 3) Hug mode insures that the polyine points
remain within a constant distance from the surface. This may also require
recursive subdivision of the polyline. Note that both non-occluded mode
and hug mode also take into account the height offset, so it is possible
to create paths that hug terrain a certain distance above it. To use this
filter, define two inputs: 1) a polyline, and 2) an image whose scalar
values represent a height field. Then specify the mode, and the height
offset to use.
An description of the algorithm is as follows. The filter begins by
projecting the polyline points to the image (offset by the specified
height offset). If the mode is non-occluded or hug, then the maximum
error along each line segment is computed and placed into a priority
queue. Each line segment is then split at the point of maximum error, and
the two new line segments are evaluated for maximum error. This process
continues until the line is not occluded by the terrain (non-occluded
mode) or satisfies the error on variation from the surface (hug
mode). (Note this process is repeated for each polyline in the
input. Also, the maximum error is computed in two parts: a maximum
positive error and maximum negative error. If the polyline is above the
terrain--i.e., the height offset is positive--in non-occluded or hug mode
all negative errors are eliminated. If the polyline is below the
terrain--i.e., the height offset is negative--in non-occluded or hug mode
all positive errors are eliminated.)
Caveats:
This algorithm requires the entire input image to be in memory, hence it
may not work for extremely large images.
The input height image is assumed to be positioned in the x-y plane so the
scalar value is the z-coordinate, height value.
A priority queue is used so that the 1) the total number of line segments
can be controlled, and 2) the algorithm can terminate when the errors in
the queue are less than the specified error tolerance.
See Also:
vtkGreedyTerrainDecimation
"""
class vtkProjectedTexture:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkProjectedTexture - assign texture coordinates for a projected texture
Super Class:
vtkDataSetAlgorithm
vtkProjectedTexture assigns texture coordinates to a dataset as if
the texture was projected from a slide projected located somewhere in the
scene. Methods are provided to position the projector and aim it at a
location, to set the width of the projector's frustum, and to set the
range of texture coordinates assigned to the dataset.
Objects in the scene that appear behind the projector are also assigned
texture coordinates; the projected image is left-right and top-bottom
flipped, much as a lens' focus flips the rays of light that pass through
it. A warning is issued if a point in the dataset falls at the focus
of the projector.
"""
class vtkPruneTreeFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkPruneTreeFilter - prune a subtree out of a vtkTree
Super Class:
vtkTreeAlgorithm
Removes a subtree rooted at a particular vertex in a vtkTree.
"""
class vtkQuadricClustering:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkQuadricClustering - reduce the number of triangles in a mesh
Super Class:
vtkPolyDataAlgorithm
vtkQuadricClustering is a filter to reduce the number of triangles in a
triangle mesh, forming a good approximation to the original geometry. The
input to vtkQuadricClustering is a vtkPolyData object, and all types of
polygonal data are handled.
The algorithm used is the one described by Peter Lindstrom in his Siggraph
2000 paper, "Out-of-Core Simplification of Large Polygonal Models." The
general approach of the algorithm is to cluster vertices in a uniform
binning of space, accumulating the quadric of each triangle (pushed out to
the triangles vertices) within each bin, and then determining an optimal
position for a single vertex in a bin by using the accumulated quadric. In
more detail, the algorithm first gets the bounds of the input poly data.
It then breaks this bounding volume into a user-specified number of
spatial bins. It then reads each triangle from the input and hashes its
vertices into these bins. (If this is the first time a bin has been
visited, initialize its quadric to the 0 matrix.) The algorithm computes
the error quadric for this triangle and adds it to the existing quadric of
the bin in which each vertex is contained. Then, if 2 or more vertices of
the triangle fall in the same bin, the triangle is dicarded. If the
triangle is not discarded, it adds the triangle to the list of output
triangles as a list of vertex identifiers. (There is one vertex id per
bin.) After all the triangles have been read, the representative vertex
for each bin is computed (an optimal location is found) using the quadric
for that bin. This determines the spatial location of the vertices of
each of the triangles in the output.
To use this filter, specify the divisions defining the spatial subdivision
in the x, y, and z directions. You must also specify an input vtkPolyData.
Then choose to either 1) use the original points that minimize the quadric
error to produce the output triangles or 2) compute an optimal position in
each bin to produce the output triangles (recommended and default behavior).
This filter can take multiple inputs. To do this, the user must explicity
call StartAppend, Append (once for each input), and EndAppend. StartAppend
sets up the data structure to hold the quadric matrices. Append processes
each triangle in the input poly data it was called on, hashes its vertices
to the appropriate bins, determines whether to keep this triangle, and
updates the appropriate quadric matrices. EndAppend determines the spatial
location of each of the representative vertices for the visited bins. While
this approach does not fit into the visualization architecture and requires
manual control, it has the advantage that extremely large data can be
processed in pieces and appended to the filter piece-by-piece.
Caveats:
This filter can drastically affect topology, i.e., topology is not
preserved.
The filter handles input triangle strips and arbitrary polygons. Arbitrary
polygons are assumed convex: during insertion they are triangulated using
a fan of triangles from the first point in the polygons. If the polygon is
concave, this can produce bad results. In this case, use vtkTriangleFilter
to triangulate the polygons first.
See Also:
vtkQuadricDecimation vtkDecimatePro vtkDecimate
"""
class vtkQuadricDecimation:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkQuadricDecimation - reduce the number of triangles in a mesh
Super Class:
vtkPolyDataAlgorithm
vtkQuadricDecimation is a filter to reduce the number of triangles in
a triangle mesh, forming a good approximation to the original geometry.
The input to vtkQuadricDecimation is a vtkPolyData object, and only
triangles are treated. If you desire to decimate polygonal meshes, first
triangulate the polygons with vtkTriangleFilter.
The algorithm is based on repeated edge collapses until the requested mesh
reduction is achieved. Edges are placed in a priority queue based on the
"cost" to delete the edge. The cost is an approximate measure of error
(distance to the original surface)--described by the so-called quadric
error measure. The quadric error measure is associated with each vertex of
the mesh and represents a matrix of planes incident on that vertex. The
distance of the planes to the vertex is the error in the position of the
vertex (originally the vertex error iz zero). As edges are deleted, the
quadric error measure associated with the two end points of the edge are
summed (this combines the plane equations) and an optimal collapse point
can be computed. Edges connected to the collapse point are then reinserted
into the queue after computing the new cost to delete them. The process
continues until the desired reduction level is reached or topological
constraints prevent further reduction. Note that this basic algorithm can
be extended to higher dimensions by
taking into account variation in attributes (i.e., scalars, vectors, and
so on).
This paper is based on the work of Garland and Heckbert who first
presented the quadric error measure at Siggraph '97 "Surface
Simplification Using Quadric Error Metrics". For details of the algorithm
Michael Garland's Ph.D. thesis is also recommended. Hughues Hoppe's Vis
'99 paper, "New Quadric Metric for Simplifying Meshes with Appearance
Attributes" is also a good take on the subject especially as it pertains
to the error metric applied to attributes.
.SECTION Thanks
Thanks to Bradley Lowekamp of the National Library of Medicine/NIH for
contributing this class.
"""
class vtkQuantizePolyDataPoints:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkQuantizePolyDataPoints - quantizes x,y,z coordinates of points
Super Class:
vtkCleanPolyData
vtkQuantizePolyDataPoints is a subclass of vtkCleanPolyData and
inherits the functionality of vtkCleanPolyData with the addition that
it quantizes the point coordinates before inserting into the point list.
The user should set QFactor to a positive value (0.25 by default) and all
{x,y,z} coordinates will be quantized to that grain size.
A tolerance of zero is expected, though positive values may be used, the
quantization will take place before the tolerance is applied.
Caveats:
Merging points can alter topology, including introducing non-manifold
forms. Handling of degenerate cells is controlled by switches in
vtkCleanPolyData.
See Also:
vtkCleanPolyData
"""
class vtkRTXMLPolyDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkRTXMLPolyDataReader - Read RealTime VTK XML PolyData files.
Super Class:
vtkXMLPolyDataReader
vtkRTXMLPolyDataReader reads the VTK XML PolyData file format in real time.
See Also:
"""
class vtkRandomGraphSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRandomGraphSource - a graph with random edges
Super Class:
vtkGraphAlgorithm
Generates a graph with a specified number of vertices, with the density of
edges specified by either an exact number of edges or the probability of
an edge. You may additionally specify whether to begin with a random
tree (which enforces graph connectivity).
"""
class vtkRearrangeFields:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRearrangeFields - Move/copy fields between field data, point data and cell data
Super Class:
vtkDataSetAlgorithm
vtkRearrangeFields is used to copy/move fields (vtkDataArrays) between
data object's field data, point data and cell data. To specify which
fields are copied/moved, the user adds operations. There are two types
of operations: 1. the type which copies/moves an attribute's data
(i.e. the field will be copied but will not be an attribute in the
target), 2. the type which copies/moves fields by name. For example:
@verbatim
rf->AddOperation(vtkRearrangeFields::COPY, "foo",
vtkRearrangeFields::DATA_OBJECT,
vtkRearrangeFields::POINT_DATA);
@endverbatim
adds an operation which copies a field (data array) called foo from
the data object's field data to point data.
From Tcl, the same operation can be added as follows:
@verbatim
rf AddOperation COPY foo DATA_OBJECT POINT_DATA
@endverbatim
The same can be done using Python and Java bindings by passing
strings as arguments.
@verbatim
Operation types: COPY, MOVE
AttributeTypes: SCALARS, VECTORS, NORMALS, TCOORDS, TENSORS
Field data locations: DATA_OBJECT, POINT_DATA, CELL_DATA
@endverbatim
Caveats:
When using Tcl, Java, Python or Visual Basic bindings, the array name
can not be one of the AttributeTypes when calling AddOperation() which
takes strings as arguments. The Tcl (Java etc.) command will
always assume the string corresponds to an attribute type when
the argument is one of the AttributeTypes. In this situation,
use the AddOperation() which takes enums.
See Also:
vtkFieldData vtkDataSet vtkDataObjectToDataSetFilter
vtkDataSetAttributes vtkDataArray vtkAssignAttribute
vtkSplitField vtkMergeFields
"""
class vtkRectangularButtonSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRectangularButtonSource - create a rectangular button
Super Class:
vtkButtonSource
vtkRectangularButtonSource creates a rectangular shaped button with
texture coordinates suitable for application of a texture map. This
provides a way to make nice looking 3D buttons. The buttons are
represented as vtkPolyData that includes texture coordinates and
normals. The button lies in the x-y plane.
To use this class you must define its width, height and length. These
measurements are all taken with respect to the shoulder of the button.
The shoulder is defined as follows. Imagine a box sitting on the floor.
The distance from the floor to the top of the box is the depth; the other
directions are the length (x-direction) and height (y-direction). In
this particular widget the box can have a smaller bottom than top. The
ratio in size between bottom and top is called the box ratio (by
default=1.0). The ratio of the texture region to the shoulder region
is the texture ratio. And finally the texture region may be out of plane
compared to the shoulder. The texture height ratio controls this.
Caveats:
The button is defined in the x-y plane. Use vtkTransformPolyDataFilter
or vtkGlyph3D to orient the button in a different direction.
See Also:
vtkButtonSource vtkEllipticalButtonSource
"""
class vtkRectilinearGridAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRectilinearGridAlgorithm - Superclass for algorithms that produce only rectilinear grid as output
Super Class:
vtkAlgorithm
"""
class vtkRectilinearGridClip:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRectilinearGridClip - Reduces the image extent of the input.
Super Class:
vtkRectilinearGridAlgorithm
vtkRectilinearGridClip will make an image smaller. The output must have
an image extent which is the subset of the input. The filter has two
modes of operation:
1: By default, the data is not copied in this filter.
Only the whole extent is modified.
2: If ClipDataOn is set, then you will get no more that the clipped
extent.
"""
class vtkRectilinearGridGeometryFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRectilinearGridGeometryFilter - extract geometry for a rectilinear grid
Super Class:
vtkPolyDataAlgorithm
vtkRectilinearGridGeometryFilter is a filter that extracts geometry from a
rectilinear grid. By specifying appropriate i-j-k indices, it is possible
to extract a point, a curve, a surface, or a "volume". The volume
is actually a (n x m x o) region of points.
The extent specification is zero-offset. That is, the first k-plane in
a 50x50x50 rectilinear grid is given by (0,49, 0,49, 0,0).
Caveats:
If you don't know the dimensions of the input dataset, you can use a large
number to specify extent (the number will be clamped appropriately). For
example, if the dataset dimensions are 50x50x50, and you want a the fifth
k-plane, you can use the extents (0,100, 0,100, 4,4). The 100 will
automatically be clamped to 49.
See Also:
vtkGeometryFilter vtkExtractGrid
"""
class vtkRectilinearGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkRectilinearGridReader - read vtk rectilinear grid data file
Super Class:
vtkDataReader
vtkRectilinearGridReader is a source object that reads ASCII or binary
rectilinear grid data files in vtk format (see text for format details).
The output of this reader is a single vtkRectilinearGrid data object.
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkRectilinearGrid vtkDataReader
"""
class vtkRectilinearGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkRectilinearGridWriter - write vtk rectilinear grid data file
Super Class:
vtkDataWriter
vtkRectilinearGridWriter is a source object that writes ASCII or binary
rectilinear grid data files in vtk format. See text for format details.
Caveats:
Binary files written on one system may not be readable on other systems.
"""
class vtkRectilinearSynchronizedTemplates:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRectilinearSynchronizedTemplates - generate isosurface from rectilinear grid
Super Class:
vtkPolyDataAlgorithm
vtkRectilinearSynchronizedTemplates is a 3D implementation (for rectilinear
grids) of the synchronized template algorithm. Note that vtkContourFilter
will automatically use this class when appropriate.
Caveats:
This filter is specialized to rectilinear grids.
See Also:
vtkContourFilter vtkSynchronizedTemplates2D vtkSynchronizedTemplates3D
"""
class vtkRecursiveDividingCubes:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRecursiveDividingCubes - create points lying on isosurface (using recursive approach)
Super Class:
vtkPolyDataAlgorithm
vtkRecursiveDividingCubes is a filter that generates points lying on a
surface of constant scalar value (i.e., an isosurface). Dense point
clouds (i.e., at screen resolution) will appear as a surface. Less dense
clouds can be used as a source to generate streamlines or to generate
"transparent" surfaces.
This implementation differs from vtkDividingCubes in that it uses a
recursive procedure. In many cases this can result in generating
more points than the procedural implementation of vtkDividingCubes. This is
because the recursive procedure divides voxels by multiples of powers of
two. This can over-constrain subdivision. One of the advantages of the
recursive technique is that the recursion is terminated earlier, which in
some cases can be more efficient.
See Also:
vtkDividingCubes vtkContourFilter vtkMarchingCubes
"""
class vtkReflectionFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkReflectionFilter - reflects a data set across a plane
Super Class:
vtkUnstructuredGridAlgorithm
The vtkReflectionFilter reflects a data set across one of the
planes formed by the data set's bounding box.
Since it converts data sets into unstructured grids, it is not effeicient
for structured data sets.
"""
class vtkRegularPolygonSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRegularPolygonSource - create a regular, n-sided polygon and/or polyline
Super Class:
vtkPolyDataAlgorithm
vtkRegularPolygonSource is a source object that creates a single n-sided polygon and/or
polyline. The polygon is centered at a specified point, orthogonal to
a specified normal, and with a circumscribing radius set by the user. The user can
also specify the number of sides of the polygon ranging from [3,N].
This object can be used for seeding streamlines or defining regions for clipping/cutting.
"""
class vtkRenderLargeImage:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRenderLargeImage - Use tiling to generate a large rendering
Super Class:
vtkAlgorithm
vtkRenderLargeImage provides methods needed to read a region from a file.
"""
class vtkRendererSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRendererSource - take a renderer into the pipeline
Super Class:
vtkAlgorithm
vtkRendererSource is a source object that gets its input from a
renderer and converts it to structured points. This can then be
used in a visualization pipeline. You must explicitly send a
Modify() to this object to get it to reload its data from the
renderer. Consider using vtkWindowToImageFilter instead of this
class.
The data placed into the output is the renderer's image rgb values.
Optionally, you can also grab the image depth (e.g., z-buffer) values, and
place then into the output (point) field data.
See Also:
vtkWindowToImageFilter vtkRenderer vtkStructuredPoints
"""
class vtkReverseSense:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkReverseSense - reverse the ordering of polygonal cells and/or vertex normals
Super Class:
vtkPolyDataAlgorithm
vtkReverseSense is a filter that reverses the order of polygonal cells
and/or reverses the direction of point and cell normals. Two flags are
used to control these operations. Cell reversal means reversing the order
of indices in the cell connectivity list. Normal reversal means
multiplying the normal vector by -1 (both point and cell normals,
if present).
Caveats:
Normals can be operated on only if they are present in the data.
"""
class vtkRibbonFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRibbonFilter - create oriented ribbons from lines defined in polygonal dataset
Super Class:
vtkPolyDataAlgorithm
vtkRibbonFilter is a filter to create oriented ribbons from lines defined
in polygonal dataset. The orientation of the ribbon is along the line
segments and perpendicular to "projected" line normals. Projected line
normals are the original line normals projected to be perpendicular to
the local line segment. An offset angle can be specified to rotate the
ribbon with respect to the normal.
Caveats:
The input line must not have duplicate points, or normals at points that
are parallel to the incoming/outgoing line segments. (Duplicate points
can be removed with vtkCleanPolyData.) If a line does not meet this
criteria, then that line is not tubed.
See Also:
vtkTubeFilter
"""
class vtkRotationFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRotationFilter - Duplicates a data set by rotation about an axis
Super Class:
vtkUnstructuredGridAlgorithm
The vtkRotationFilter duplicates a data set by rotation about one of the
3 axis of the dataset's reference.
Since it converts data sets into unstructured grids, it is not efficient
for structured data sets.
.SECTION Thanks
Theophane Foggia of The Swiss National Supercomputing Centre (CSCS)
for creating and contributing this filter
"""
class vtkRotationalExtrusionFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRotationalExtrusionFilter - sweep polygonal data creating "skirt" from free edges and lines, and lines from vertices
Super Class:
vtkPolyDataAlgorithm
vtkRotationalExtrusionFilter is a modeling filter. It takes polygonal
data as input and generates polygonal data on output. The input dataset
is swept around the z-axis to create new polygonal primitives. These
primitives form a "skirt" or swept surface. For example, sweeping a
line results in a cylindrical shell, and sweeping a circle creates a
torus.
There are a number of control parameters for this filter. You can
control whether the sweep of a 2D object (i.e., polygon or triangle
strip) is capped with the generating geometry via the "Capping" instance
variable. Also, you can control the angle of rotation, and whether
translation along the z-axis is performed along with the rotation.
(Translation is useful for creating "springs".) You also can adjust
the radius of the generating geometry using the "DeltaRotation" instance
variable.
The skirt is generated by locating certain topological features. Free
edges (edges of polygons or triangle strips only used by one polygon or
triangle strips) generate surfaces. This is true also of lines or
polylines. Vertices generate lines.
This filter can be used to model axisymmetric objects like cylinders,
bottles, and wine glasses; or translational/rotational symmetric objects
like springs or corkscrews.
Caveats:
If the object sweeps 360 degrees, radius does not vary, and the object
does not translate, capping is not performed. This is because the cap
is unnecessary.
Some polygonal objects have no free edges (e.g., sphere). When swept,
this will result in two separate surfaces if capping is on, or no surface
if capping is off.
See Also:
vtkLinearExtrusionFilter
"""
class vtkRuledSurfaceFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkRuledSurfaceFilter - generates a surface from a set of lines
Super Class:
vtkPolyDataAlgorithm
vtkRuledSurfaceFilter is a filter that generates a surface from a set of
lines. The lines are assumed to be "parallel" in the sense that they do
not intersect and remain somewhat close to one another. A surface is
generated by connecting the points defining each pair of lines with
straight lines. This creates a strip for each pair of lines (i.e., a
triangulation is created from two generating lines). The filter can handle
an arbitrary number of lines, with lines i and i+1 assumed connected.
Note that there are several different approaches for creating the ruled
surface, the method for creating the surface can either use the input
points or resample from the polylines (using a user-specified resolution).
This filter offers some other important features. A DistanceFactor ivar is
used to decide when two lines are too far apart to connect. (The factor is
a multiple of the distance between the first two points of the two lines
defining the strip.) If the distance between the two generating lines
becomes too great, then the surface is not generated in that
region. (Note: if the lines separate and then merge, then a hole can be
generated in the surface.) In addition, the Offset and OnRation ivars can
be used to create nifty striped surfaces. Closed surfaces (e.g., tubes) can
be created by setting the CloseSurface ivar. (The surface can be closed
in the other direction by repeating the first and last point in the
polylines defining the surface.)
An important use of this filter is to combine it with vtkStreamLine to
generate stream surfaces. It can also be used to create surfaces from
contours.
Caveats:
The number of lines must be greater than two if a surface is to be
generated. sides (i.e., a ribbon), use vtkRibbonFilter.
See Also:
vtkRibbonFilter vtkStreamLine
"""
class vtkSESAMEReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkSESAMEReader - read SESAME files
Super Class:
vtkRectilinearGridSource
vtkSESAMEReader is a source object that reads SESAME files.
Currently supported tables include 301, 304, 502, 503, 504, 505, 602
SESAMEReader creates rectilinear grid datasets. The dimension of the
dataset depends upon the number of densities and temperatures in the table.
Values at certain temperatures and densities are stored as scalars.
"""
class vtkSLCReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkSLCReader - read an SLC volume file.
Super Class:
vtkImageReader2
vtkSLCReader reads an SLC file and creates a structured point dataset.
The size of the volume and the data spacing is set from the SLC file
header.
"""
class vtkSQLTableReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkSQLTableReader - executes an sql query and retrieves results into a table
Super Class:
vtkTableAlgorithm
vtkSQLTableReader creates a vtkTable with the results of an arbitrary SQL
query. To use this filter, you first need an instance of a vtkSQLDatabase
subclass. You may use the database class to obtain a vtkSQLQuery instance.
Set that query on this filter to extract the query as a table.
.SECTION Thanks
Thanks to Andrew Wilson from Sandia National Laboratories for his work
on the database classes.
See Also:
vtkSQLDatabase vtkSQLQuery
"""
class vtkSTLReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkSTLReader - read ASCII or binary stereo lithography files
Super Class:
vtkPolyDataAlgorithm
vtkSTLReader is a source object that reads ASCII or binary stereo
lithography files (.stl files). The FileName must be specified to
vtkSTLReader. The object automatically detects whether the file is
ASCII or binary.
.stl files are quite inefficient since they duplicate vertex
definitions. By setting the Merging boolean you can control whether the
point data is merged after reading. Merging is performed by default,
however, merging requires a large amount of temporary storage since a
3D hash table must be constructed.
Caveats:
Binary files written on one system may not be readable on other systems.
vtkSTLWriter uses VAX or PC byte ordering and swaps bytes on other systems.
"""
class vtkSTLWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkSTLWriter - write stereo lithography files
Super Class:
vtkPolyDataWriter
vtkSTLWriter writes stereo lithography (.stl) files in either ASCII or
binary form. Stereo lithography files only contain triangles. If polygons
with more than 3 vertices are present, only the first 3 vertices are
written. Use vtkTriangleFilter to convert polygons to triangles.
Caveats:
Binary files written on one system may not be readable on other systems.
vtkSTLWriter uses VAX or PC byte ordering and swaps bytes on other systems.
"""
class vtkSampleFunction:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSampleFunction - sample an implicit function over a structured point set
Super Class:
vtkImageAlgorithm
vtkSampleFunction is a source object that evaluates an implicit function
and normals at each point in a vtkStructuredPoints. The user can specify
the sample dimensions and location in space to perform the sampling. To
create closed surfaces (in conjunction with the vtkContourFilter), capping
can be turned on to set a particular value on the boundaries of the sample
space.
See Also:
vtkImplicitModeller
"""
class vtkSelectPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSelectPolyData - select portion of polygonal mesh; generate selection scalars
Super Class:
vtkPolyDataAlgorithm
vtkSelectPolyData is a filter that selects polygonal data based on
defining a "loop" and indicating the region inside of the loop. The
mesh within the loop consists of complete cells (the cells are not
cut). Alternatively, this filter can be used to generate scalars.
These scalar values, which are a distance measure to the loop, can
be used to clip, contour. or extract data (i.e., anything that an
implicit function can do).
The loop is defined by an array of x-y-z point coordinates.
(Coordinates should be in the same coordinate space as the input
polygonal data.) The loop can be concave and non-planar, but not
self-intersecting. The input to the filter is a polygonal mesh
(only surface primitives such as triangle strips and polygons); the
output is either a) a portion of the original mesh laying within
the selection loop (GenerateSelectionScalarsOff); or b) the same
polygonal mesh with the addition of scalar values
(GenerateSelectionScalarsOn).
The algorithm works as follows. For each point coordinate in the
loop, the closest point in the mesh is found. The result is a loop
of closest point ids from the mesh. Then, the edges in the mesh
connecting the closest points (and laying along the lines forming
the loop) are found. A greedy edge tracking procedure is used as
follows. At the current point, the mesh edge oriented in the
direction of and whose end point is closest to the line is
chosen. The edge is followed to the new end point, and the
procedure is repeated. This process continues until the entire loop
has been created.
To determine what portion of the mesh is inside and outside of the
loop, three options are possible. 1) the smallest connected region,
2) the largest connected region, and 3) the connected region
closest to a user specified point. (Set the ivar SelectionMode.)
Once the loop is computed as above, the GenerateSelectionScalars
controls the output of the filter. If on, then scalar values are
generated based on distance to the loop lines. Otherwise, the cells
laying inside the selection loop are output. By default, the mesh
lying within the loop is output; however, if InsideOut is on, then
the portion of the mesh lying outside of the loop is output.
The filter can be configured to generate the unselected portions of
the mesh as output by setting GenerateUnselectedOutput. Use the
method GetUnselectedOutput to access this output. (Note: this flag
is pertinent only when GenerateSelectionScalars is off.)
Caveats:
Make sure that the points you pick are on a connected surface. If
not, then the filter will generate an empty or partial result. Also,
self-intersecting loops will generate unpredictable results.
During processing of the data, non-triangular cells are converted to
triangles if GenerateSelectionScalars is off.
See Also:
vtkImplicitSelectionLoop
"""
class vtkSelectVisiblePoints:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSelectVisiblePoints - extract points that are visible (based on z-buffer calculation)
Super Class:
vtkPolyDataAlgorithm
vtkSelectVisiblePoints is a filter that selects points based on
whether they are visible or not. Visibility is determined by
accessing the z-buffer of a rendering window. (The position of each
input point is converted into display coordinates, and then the
z-value at that point is obtained. If within the user-specified
tolerance, the point is considered visible.)
Points that are visible (or if the ivar SelectInvisible is on,
invisible points) are passed to the output. Associated data
attributes are passed to the output as well.
This filter also allows you to specify a rectangular window in display
(pixel) coordinates in which the visible points must lie. This can be
used as a sort of local "brushing" operation to select just data within
a window.
Caveats:
You must carefully synchronize the execution of this filter. The
filter refers to a renderer, which is modified every time a render
occurs. Therefore, the filter is always out of date, and always
executes. You may have to perform two rendering passes, or if you
are using this filter in conjunction with vtkLabeledDataMapper,
things work out because 2D rendering occurs after the 3D rendering.
"""
class vtkSelectionAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSelectionAlgorithm - Superclass for algorithms that produce only Selection as output
Super Class:
vtkAlgorithm
vtkSelectionAlgorithm is a convenience class to make writing algorithms
easier. It is also designed to help transition old algorithms to the new
pipeline edgehitecture. There are some assumptions and defaults made by this
class you should be aware of. This class defaults such that your filter
will have one input port and one output port. If that is not the case
simply change it with SetNumberOfInputPorts etc. See this class
constructor for the default. This class also provides a FillInputPortInfo
method that by default says that all inputs will be Selection. If that
isn't the case then please override this method in your subclass. This
class breaks out the downstream requests into separate functions such as
ExecuteData and ExecuteInformation. For new algorithms you should
implement RequestData( request, inputVec, outputVec) but for older filters
there is a default implementation that calls the old ExecuteData(output)
signature. For even older filters that don't implement ExecuteData the
default implementation calls the even older Execute() signature.
.SECTION Thanks
Thanks to Patricia Crossno, Ken Moreland, Andrew Wilson and Brian Wylie from
Sandia National Laboratories for their help in developing this class.
"""
class vtkSelectionLink:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSelectionLink - An algorithm for linking selections among objects
Super Class:
vtkSelectionAlgorithm
vtkSelectionLink is a simple source filter which outputs the selection
object stored internally. Multiple objects may share
the same selection link filter and connect it to an internal pipeline so
that if one object changes the selection, it will be pulled into all
the other objects when their pipelines update.
"""
class vtkSelectionSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSelectionSource - Generate selection from given set of ids
Super Class:
vtkSelectionAlgorithm
None provided.
"""
class vtkShepardMethod:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkShepardMethod - sample unstructured points onto structured points using the method of Shepard
Super Class:
vtkImageAlgorithm
vtkShepardMethod is a filter used to visualize unstructured point data using
Shepard's method. The method works by resampling the unstructured points
onto a structured points set. The influence functions are described as
"inverse distance weighted". Once the structured points are computed, the
usual visualization techniques (e.g., iso-contouring or volume rendering)
can be used visualize the structured points.
Caveats:
The input to this filter is any dataset type. This filter can be used
to resample any form of data, i.e., the input data need not be
unstructured.
The bounds of the data (i.e., the sample space) is automatically computed
if not set by the user.
If you use a maximum distance less than 1.0, some output points may
never receive a contribution. The final value of these points can be
specified with the "NullValue" instance variable.
"""
class vtkShrinkFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkShrinkFilter - shrink cells composing an arbitrary data set
Super Class:
vtkUnstructuredGridAlgorithm
vtkShrinkFilter shrinks cells composing an arbitrary data set
towards their centroid. The centroid of a cell is computed as the
average position of the cell points. Shrinking results in
disconnecting the cells from one another. The output of this filter
is of general dataset type vtkUnstructuredGrid.
Caveats:
It is possible to turn cells inside out or cause self intersection
in special cases.
See Also:
vtkShrinkPolyData
"""
class vtkShrinkPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkShrinkPolyData - shrink cells composing PolyData
Super Class:
vtkPolyDataAlgorithm
vtkShrinkPolyData shrinks cells composing a polygonal dataset (e.g.,
vertices, lines, polygons, and triangle strips) towards their centroid.
The centroid of a cell is computed as the average position of the
cell points. Shrinking results in disconnecting the cells from
one another. The output dataset type of this filter is polygonal data.
During execution the filter passes its input cell data to its
output. Point data attributes are copied to the points created during the
shrinking process.
Caveats:
It is possible to turn cells inside out or cause self intersection
in special cases.
Users should use the vtkTriangleFilter to triangulate meshes that
contain triangle strips.
See Also:
vtkShrinkFilter
"""
class vtkSimpleElevationFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSimpleElevationFilter - generate scalars along a specified direction
Super Class:
vtkDataSetAlgorithm
vtkSimpleElevationFilter is a filter to generate scalar values from a
dataset. The scalar values are generated by dotting a user-specified
vector against a vector defined from the input dataset points to the
origin.
See Also:
vtkElevationFilter
"""
class vtkSimpleImageFilterExample:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSimpleImageFilterExample - Simple example of an image-image filter.
Super Class:
vtkSimpleImageToImageFilter
This is an example of a simple image-image filter. It copies it's input
to it's output (point by point). It shows how templates can be used
to support various data types.
.SECTION See also
vtkSimpleImageToImageFilter
"""
class vtkSimplePointsReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkSimplePointsReader - Read a list of points from a file.
Super Class:
vtkPolyDataAlgorithm
vtkSimplePointsReader is a source object that reads a list of
points from a file. Each point is specified by three
floating-point values in ASCII format. There is one point per line
of the file. A vertex cell is created for each point in the
output. This reader is meant as an example of how to write a
reader in VTK.
"""
class vtkSmoothPolyDataFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSmoothPolyDataFilter - adjust point positions using Laplacian smoothing
Super Class:
vtkPolyDataAlgorithm
vtkSmoothPolyDataFilter is a filter that adjusts point coordinates using
Laplacian smoothing. The effect is to "relax" the mesh, making the cells
better shaped and the vertices more evenly distributed. Note that this
filter operates on the lines, polygons, and triangle strips composing an
instance of vtkPolyData. Vertex or poly-vertex cells are never modified.
The algorithm proceeds as follows. For each vertex v, a topological and
geometric analysis is performed to determine which vertices are connected
to v, and which cells are connected to v. Then, a connectivity array is
constructed for each vertex. (The connectivity array is a list of lists
of vertices that directly attach to each vertex.) Next, an iteration
phase begins over all vertices. For each vertex v, the coordinates of v
are modified according to an average of the connected vertices. (A
relaxation factor is available to control the amount of displacement of
v). The process repeats for each vertex. This pass over the list of
vertices is a single iteration. Many iterations (generally around 20 or
so) are repeated until the desired result is obtained.
There are some special instance variables used to control the execution
of this filter. (These ivars basically control what vertices can be
smoothed, and the creation of the connectivity array.) The
BoundarySmoothing ivar enables/disables the smoothing operation on
vertices that are on the "boundary" of the mesh. A boundary vertex is one
that is surrounded by a semi-cycle of polygons (or used by a single
line).
Another important ivar is FeatureEdgeSmoothing. If this ivar is
enabled, then interior vertices are classified as either "simple",
"interior edge", or "fixed", and smoothed differently. (Interior
vertices are manifold vertices surrounded by a cycle of polygons; or used
by two line cells.) The classification is based on the number of feature
edges attached to v. A feature edge occurs when the angle between the two
surface normals of a polygon sharing an edge is greater than the
FeatureAngle ivar. Then, vertices used by no feature edges are classified
"simple", vertices used by exactly two feature edges are classified
"interior edge", and all others are "fixed" vertices.
Once the classification is known, the vertices are smoothed
differently. Corner (i.e., fixed) vertices are not smoothed at all.
Simple vertices are smoothed as before (i.e., average of connected
vertex coordinates). Interior edge vertices are smoothed only along
their two connected edges, and only if the angle between the edges
is less than the EdgeAngle ivar.
The total smoothing can be controlled by using two ivars. The
NumberOfIterations is a cap on the maximum number of smoothing passes.
The Convergence ivar is a limit on the maximum point motion. If the
maximum motion during an iteration is less than Convergence, then the
smoothing process terminates. (Convergence is expressed as a fraction of
the diagonal of the bounding box.)
There are two instance variables that control the generation of error
data. If the ivar GenerateErrorScalars is on, then a scalar value indicating
the distance of each vertex from its original position is computed. If the
ivar GenerateErrorVectors is on, then a vector representing change in
position is computed.
Optionally you can further control the smoothing process by defining a
second input: the Source. If defined, the input mesh is constrained to
lie on the surface defined by the Source ivar.
Caveats:
The Laplacian operation reduces high frequency information in the geometry
of the mesh. With excessive smoothing important details may be lost, and
the surface may shrink towards the centroid. Enabling FeatureEdgeSmoothing
helps reduce this effect, but cannot entirely eliminate it. You may also
wish to try vtkWindowedSincPolyDataFilter. It does a better job of
minimizing shrinkage.
See Also:
vtkWindowedSincPolyDataFilter vtkDecimate vtkDecimatePro
"""
class vtkSpatialRepresentationFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSpatialRepresentationFilter - generate polygonal model of spatial search object (i.e., a vtkLocator)
Super Class:
vtkPolyDataSource
vtkSpatialRepresentationFilter generates an polygonal representation of a
spatial search (vtkLocator) object. The representation varies depending
upon the nature of the spatial search object. For example, the
representation for vtkOBBTree is a collection of oriented bounding
boxes. Ths input to this filter is a dataset of any type, and the output
is polygonal data. You must also specify the spatial search object to
use.
Generally spatial search objects are used for collision detection and
other geometric operations, but in this filter one or more levels of
spatial searchers can be generated to form a geometric approximation to
the input data. This is a form of data simplification, generally used to
accelerate the rendering process. Or, this filter can be used as a
debugging/ visualization aid for spatial search objects.
This filter can generate one or more output vtkPolyData corresponding to
different levels in the spatial search tree. The output data is retrieved
using the GetOutput(id) method, where id ranges from 0 (root level)
to Level. Note that the output for level "id" is not computed unless a
GetOutput(id) method is issued. Thus, if you desire three levels of output
(say 2,4,7), you would have to invoke GetOutput(2), GetOutput(4), and
GetOutput(7). (Also note that the Level ivar is computed automatically
depending on the size and nature of the input data.) There is also
another GetOutput() method that takes no parameters. This method returns
the leafs of the spatial search tree, which may be at different levels.
Caveats:
You can specify the number of levels of to generate with the MaxLevels
ivar. However, when the spatial search tree is built, this number of levels
may not actually be generated. The actual number available can be found in
the Levels ivar. Note that the value of Levels may change after filter
execution.
See Also:
vtkLocator vtkPointLocator vtkCellLocator vtkOBBTree
"""
class vtkSpherePuzzle:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSpherePuzzle - create a polygonal sphere centered at the origin
Super Class:
vtkPolyDataAlgorithm
vtkSpherePuzzle creates
"""
class vtkSpherePuzzleArrows:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSpherePuzzleArrows - Visualize permutation of the sphere puzzle.
Super Class:
vtkPolyDataAlgorithm
vtkSpherePuzzleArrows creates
"""
class vtkSphereSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSphereSource - create a polygonal sphere centered at the origin
Super Class:
vtkPolyDataAlgorithm
vtkSphereSource creates a sphere (represented by polygons) of specified
radius centered at the origin. The resolution (polygonal discretization)
in both the latitude (phi) and longitude (theta) directions can be
specified. It also is possible to create partial spheres by specifying
maximum phi and theta angles. By default, the surface tessellation of
the sphere uses triangles; however you can set LatLongTessellation to
produce a tessellation using quadrilaterals.
Caveats:
Resolution means the number of latitude or longitude lines for a complete
sphere. If you create partial spheres the number of latitude/longitude
lines may be off by one.
"""
class vtkSplineFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSplineFilter - generate uniformly subdivided polylines from a set of input polyline using a vtkSpline
Super Class:
vtkPolyDataAlgorithm
vtkSplineFilter is a filter that generates an output polylines from an
input set of polylines. The polylines are uniformly subdivided and produced
with the help of a vtkSpline class that the user can specify (by default a
vtkCardinalSpline is used). The number of subdivisions of the line can be
controlled in several ways. The user can either specify the number of
subdivisions or a length of each subdivision can be provided (and the
class will figure out how many subdivisions is required over the whole
polyline). The maximum number of subdivisions can also be set.
The output of this filter is a polyline per input polyline (or line). New
points and texture coordinates are created. Point data is interpolated and
cell data passed on. Any polylines with less than two points, or who have
coincident points, are ignored.
See Also:
vtkRibbonFilter vtkTubeFilter
"""
class vtkSplitField:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSplitField - Split a field into single component fields
Super Class:
vtkDataSetAlgorithm
vtkSplitField is used to split a multi-component field (vtkDataArray)
into multiple single component fields. The new fields are put in
the same field data as the original field. The output arrays
are of the same type as the input array. Example:
@verbatim
sf->SetInputField("gradient", vtkSplitField::POINT_DATA);
sf->Split(0, "firstcomponent");
@endverbatim
tells vtkSplitField to extract the first component of the field
called gradient and create an array called firstcomponent (the
new field will be in the output's point data).
The same can be done from Tcl:
@verbatim
sf SetInputField gradient POINT_DATA
sf Split 0 firstcomponent
AttributeTypes: SCALARS, VECTORS, NORMALS, TCOORDS, TENSORS
Field locations: DATA_OBJECT, POINT_DATA, CELL_DATA
@endverbatim
Note that, by default, the original array is also passed through.
Caveats:
When using Tcl, Java, Python or Visual Basic bindings, the array name
can not be one of the AttributeTypes when calling Split() which takes
strings as arguments. The Tcl (Java etc.) command will
always assume the string corresponds to an attribute type when
the argument is one of the AttributeTypes. In this situation,
use the Split() which takes enums.
See Also:
vtkFieldData vtkDataSet vtkDataObjectToDataSetFilter
vtkDataSetAttributes vtkDataArray vtkRearrangeFields
vtkAssignAttribute vtkMergeFields
"""
class vtkStreamLine:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStreamLine - generate streamline in arbitrary dataset
Super Class:
vtkStreamer
vtkStreamLine is a filter that generates a streamline for an arbitrary
dataset. A streamline is a line that is everywhere tangent to the vector
field. Scalar values also are calculated along the streamline and can be
used to color the line. Streamlines are calculated by integrating from
a starting point through the vector field. Integration can be performed
forward in time (see where the line goes), backward in time (see where the
line came from), or in both directions. It also is possible to compute
vorticity along the streamline. Vorticity is the projection (i.e., dot
product) of the flow rotation on the velocity vector, i.e., the rotation
of flow around the streamline.
vtkStreamLine defines the instance variable StepLength. This parameter
controls the time increment used to generate individual points along
the streamline(s). Smaller values result in more line
primitives but smoother streamlines. The StepLength instance variable is
defined in terms of time (i.e., the distance that the particle travels in
the specified time period). Thus, the line segments will be smaller in areas
of low velocity and larger in regions of high velocity. (NOTE: This is
different than the IntegrationStepLength defined by the superclass
vtkStreamer. IntegrationStepLength is used to control integration step
size and is expressed as a fraction of the cell length.) The StepLength
instance variable is important because subclasses of vtkStreamLine (e.g.,
vtkDashedStreamLine) depend on this value to build their representation.
See Also:
vtkStreamer vtkDashedStreamLine vtkStreamPoints
"""
class vtkStreamPoints:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStreamPoints - generate points along streamer separated by constant time increment
Super Class:
vtkStreamer
vtkStreamPoints is a filter that generates points along a streamer.
The points are separated by a constant time increment. The resulting visual
effect (especially when coupled with vtkGlyph3D) is an indication of
particle speed.
See Also:
vtkStreamer vtkStreamLine vtkDashedStreamLine
"""
class vtkStreamTracer:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStreamTracer - Streamline generator
Super Class:
vtkPolyDataAlgorithm
vtkStreamTracer is a filter that integrates a vector field to generate
streamlines. The integration is performed using the provided integrator.
The default is second order Runge-Kutta.
vtkStreamTracer generate polylines as output. Each cell (polyline)
corresponds to one streamline. The values associated with each streamline
are stored in the cell data whereas the values associated with points
are stored in point data.
Note that vtkStreamTracer can integrate both forward and backward.
The length of the streamline is controlled by specifying either
a maximum value in the units of length, cell length or elapsed time
(the elapsed time is the time each particle would have traveled if
flow were steady). Otherwise, the integration terminates after exiting
the dataset or if the particle speed is reduced to a value less than
the terminal speed or when a maximum number of steps is reached.
The reason for the termination is stored in a cell array named
ReasonForTermination.
The quality of integration can be controlled by setting integration
step (InitialIntegrationStep) and in the case of adaptive solvers
the maximum error, the minimum integration step and the maximum
integration step. All of these can have units of length, cell length
or elapsed time.
The integration time, vorticity, rotation and angular velocity
are stored in point arrays named "IntegrationTime", "Vorticity",
"Rotation" and "AngularVelocity" respectively (vorticity, rotation
and angular velocity are computed only when ComputeVorticity is on).
All point attributes in the source data set are interpolated on the
new streamline points.
vtkStreamTracer integrates through any type of dataset. As a result, if the
dataset contains 2D cells such as polygons or triangles, the integration is
constrained to lie on the surface defined by the 2D cells.
The starting point of traces may be defined in two different ways.
Starting from global x-y-z "position" allows you to start a single trace
at a specified x-y-z coordinate. If you specify a source object,
a trace will be generated for each point in the source that is
inside the dataset.
See Also:
vtkRibbonFilter vtkRuledSurfaceFilter vtkInitialValueProblemSolver
vtkRungeKutta2 vtkRungeKutta4 vtkRungeKutta45
"""
class vtkStringToNumeric:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStringToNumeric - Converts string arrays to numeric arrays
Super Class:
vtkDataObjectAlgorithm
vtkStringToNumeric is a filter for converting a string array
into a numeric arrays.
"""
class vtkStringToTimePoint:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStringToTimePoint - Converts a string array to a integral time array
Super Class:
vtkDataObjectAlgorithm
vtkStringToTimePoint is a filter for converting a string array
into a datetime, time or date array. The input strings must
conform to one of the ISO8601 formats defined in vtkTimePointUtility.
The input array specified by SetInputArrayToProcess(...)
indicates the array to process. This array must be of type
vtkStringArray.
The output array will be of type vtkTypeUInt64Array.
"""
class vtkStripper:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStripper - create triangle strips and/or poly-lines
Super Class:
vtkPolyDataAlgorithm
Caveats:
If triangle strips or poly-lines exist in the input data they will
be passed through to the output data. This filter will only construct
triangle strips if triangle polygons are available; and will only
construct poly-lines if lines are available.
See Also:
vtkTriangleFilter
"""
class vtkStructuredGridAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStructuredGridAlgorithm - Superclass for algorithms that produce only structured grid as output
Super Class:
vtkAlgorithm
"""
class vtkStructuredGridClip:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStructuredGridClip - Reduces the image extent of the input.
Super Class:
vtkStructuredGridAlgorithm
vtkStructuredGridClip will make an image smaller. The output must have
an image extent which is the subset of the input. The filter has two
modes of operation:
1: By default, the data is not copied in this filter.
Only the whole extent is modified.
2: If ClipDataOn is set, then you will get no more that the clipped
extent.
"""
class vtkStructuredGridGeometryFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStructuredGridGeometryFilter - extract geometry for structured grid
Super Class:
vtkPolyDataAlgorithm
vtkStructuredGridGeometryFilter is a filter that extracts geometry from a
structured grid. By specifying appropriate i-j-k indices, it is possible
to extract a point, a curve, a surface, or a "volume". Depending upon the
type of data, the curve and surface may be curved or planar. (The volume
is actually a (n x m x o) region of points.)
The extent specification is zero-offset. That is, the first k-plane in
a 50x50x50 structured grid is given by (0,49, 0,49, 0,0).
The output of this filter is affected by the structured grid blanking.
If blanking is on, and a blanking array defined, then those cells
attached to blanked points are not output. (Blanking is a property of
the input vtkStructuredGrid.)
Caveats:
If you don't know the dimensions of the input dataset, you can use a large
number to specify extent (the number will be clamped appropriately). For
example, if the dataset dimensions are 50x50x50, and you want a the fifth
k-plane, you can use the extents (0,100, 0,100, 4,4). The 100 will
automatically be clamped to 49.
See Also:
vtkGeometryFilter vtkExtractGrid vtkStructuredGrid
"""
class vtkStructuredGridOutlineFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkStructuredGridOutlineFilter - create wireframe outline for structured grid
Super Class:
vtkPolyDataAlgorithm
vtkStructuredGridOutlineFilter is a filter that generates a wireframe
outline of a structured grid (vtkStructuredGrid). Structured data is
topologically a cube, so the outline will have 12 "edges".
"""
class vtkStructuredGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkStructuredGridReader - read vtk structured grid data file
Super Class:
vtkDataReader
vtkStructuredGridReader is a source object that reads ASCII or binary
structured grid data files in vtk format. (see text for format details).
The output of this reader is a single vtkStructuredGrid data object.
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkStructuredGrid vtkDataReader
"""
class vtkStructuredGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkStructuredGridWriter - write vtk structured grid data file
Super Class:
vtkDataWriter
vtkStructuredGridWriter is a source object that writes ASCII or binary
structured grid data files in vtk format. See text for format details.
Caveats:
Binary files written on one system may not be readable on other systems.
"""
class vtkStructuredPointsReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkStructuredPointsReader - read vtk structured points data file
Super Class:
vtkDataReader
vtkStructuredPointsReader is a source object that reads ASCII or binary
structured points data files in vtk format (see text for format details).
The output of this reader is a single vtkStructuredPoints data object.
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkStructuredPoints vtkDataReader
"""
class vtkStructuredPointsWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkStructuredPointsWriter - write vtk structured points data file
Super Class:
vtkDataWriter
vtkStructuredPointsWriter is a source object that writes ASCII or binary
structured points data in vtk file format. See text for format details.
Caveats:
Binary files written on one system may not be readable on other systems.
"""
class vtkSubPixelPositionEdgels:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSubPixelPositionEdgels - adjust edgel locations based on gradients.
Super Class:
vtkPolyDataAlgorithm
vtkSubPixelPositionEdgels is a filter that takes a series of linked
edgels (digital curves) and gradient maps as input. It then adjusts
the edgel locations based on the gradient data. Specifically, the
algorithm first determines the neighboring gradient magnitudes of
an edgel using simple interpolation of its neighbors. It then fits
the following three data points: negative gradient direction
gradient magnitude, edgel gradient magnitude and positive gradient
direction gradient magnitude to a quadratic function. It then
solves this quadratic to find the maximum gradient location along
the gradient orientation. It then modifies the edgels location
along the gradient orientation to the calculated maximum
location. This algorithm does not adjust an edgel in the direction
orthogonal to its gradient vector.
See Also:
vtkImageData vtkImageGradient vtkLinkEdgels
"""
class vtkSubdivideTetra:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSubdivideTetra - subdivide one tetrahedron into twelve for every tetra
Super Class:
vtkUnstructuredGridAlgorithm
This filter subdivides tetrahedra in an unstructured grid into twelve tetrahedra.
"""
class vtkSuperquadricSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSuperquadricSource - create a polygonal superquadric centered
Super Class:
vtkPolyDataAlgorithm
vtkSuperquadricSource creates a superquadric (represented by polygons)
of specified
size centered at the origin. The resolution (polygonal discretization)
in both the latitude (phi) and longitude (theta) directions can be
specified. Roundness parameters (PhiRoundness and ThetaRoundness) control
the shape of the superquadric. The Toroidal boolean controls whether
a toroidal superquadric is produced. If so, the Thickness parameter
controls the thickness of the toroid: 0 is the thinnest allowable
toroid, and 1 has a minimum sized hole. The Scale parameters allow
the superquadric to be scaled in x, y, and z (normal vectors are correctly
generated in any case). The Size parameter controls size of the
superquadric.
This code is based on "Rigid physically based superquadrics", A. H. Barr,
in "Graphics Gems III", David Kirk, ed., Academic Press, 1992.
Caveats:
Resolution means the number of latitude or longitude lines for a complete
superquadric. The resolution parameters are rounded to the nearest 4
in phi and 8 in theta.
Texture coordinates are not equally distributed around all superquadrics.
The Size and Thickness parameters control coefficients of superquadric
generation, and may do not exactly describe the size of the superquadric.
"""
class vtkSurfaceReconstructionFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSurfaceReconstructionFilter - reconstructs a surface from unorganized points
Super Class:
vtkImageAlgorithm
vtkSurfaceReconstructionFilter takes a list of points assumed to lie on
the surface of a solid 3D object. A signed measure of the distance to the
surface is computed and sampled on a regular grid. The grid can then be
contoured at zero to extract the surface. The default values for
neighborhood size and sample spacing should give reasonable results for
most uses but can be set if desired. This procedure is based on the PhD
work of Hugues Hoppe: http://www.research.microsoft.com/~hoppe
"""
class vtkSynchronizedTemplates2D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSynchronizedTemplates2D - generate isoline(s) from a structured points set
Super Class:
vtkPolyDataAlgorithm
vtkSynchronizedTemplates2D is a 2D implementation of the synchronized
template algorithm. Note that vtkContourFilter will automatically
use this class when appropriate.
Caveats:
This filter is specialized to 2D images.
See Also:
vtkContourFilter vtkSynchronizedTemplates3D
"""
class vtkSynchronizedTemplates3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSynchronizedTemplates3D - generate isosurface from structured points
Super Class:
vtkPolyDataAlgorithm
vtkSynchronizedTemplates3D is a 3D implementation of the synchronized
template algorithm. Note that vtkContourFilter will automatically
use this class when appropriate.
Caveats:
This filter is specialized to 3D images (aka volumes).
See Also:
vtkContourFilter vtkSynchronizedTemplates2D
"""
class vtkSynchronizedTemplatesCutter3D:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkSynchronizedTemplatesCutter3D - generate cut surface from structured points
Super Class:
vtkSynchronizedTemplates3D
vtkSynchronizedTemplatesCutter3D is an implementation of the synchronized
template algorithm. Note that vtkCutFilter will automatically
use this class when appropriate.
See Also:
vtkContourFilter vtkSynchronizedTemplates3D
"""
class vtkTIFFReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkTIFFReader - read TIFF files
Super Class:
vtkImageReader2
vtkTIFFReader is a source object that reads TIFF files.
It should be able to read almost any TIFF file
See Also:
vtkTIFFWriter
"""
class vtkTIFFWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkTIFFWriter - write out image data as a TIFF file
Super Class:
vtkImageWriter
vtkTIFFWriter writes image data as a TIFF data file. Data can be written
uncompressed or compressed. Several forms of compression are supported
including packed bits, JPEG, deflation, and LZW. (Note: LZW compression
is currently under patent in the US and is disabled until the patent
expires. However, the mechanism for supporting this compression is available
for those with a valid license or to whom the patent does not apply.)
"""
class vtkTableAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTableAlgorithm - Superclass for algorithms that produce only vtkTables as output
Super Class:
vtkAlgorithm
vtkTableAlgorithm is a convenience class to make writing algorithms
easier. It is also designed to help transition old algorithms to the new
pipeline architecture. There are some assumptions and defaults made by this
class you should be aware of. This class defaults such that your filter
will have one input port and one output port. If that is not the case
simply change it with SetNumberOfInputPorts etc. See this class
constructor for the default. This class also provides a FillInputPortInfo
method that by default says that all inputs will be Tree. If that
isn't the case then please override this method in your subclass. This
class breaks out the downstream requests into separate functions such as
ExecuteData and ExecuteInformation. For new algorithms you should
implement RequestData( request, inputVec, outputVec) but for older filters
there is a default implementation that calls the old ExecuteData(output)
signature. For even older filters that don't implement ExecuteData the
default implementation calls the even older Execute() signature.
.SECTION Thanks
Thanks to Brian Wylie for creating this class.
"""
class vtkTableReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkTableReader - read vtkTable data file
Super Class:
vtkDataReader
vtkTableReader is a source object that reads ASCII or binary
vtkTable data files in vtk format. (see text for format details).
The output of this reader is a single vtkTable data object.
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkTable vtkDataReader vtkTableWriter
"""
class vtkTableToGraphFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTableToGraphFilter - convert a vtkTable into a vtkGraph
Super Class:
vtkGraphAlgorithm
Creates a vtkGraph using one or two vtkTables. The first (required)
input table must have one row for each arc in the graph.
The table must have two columns which represent the source and target
node ids. Use
SetInputArrayToProcess(i,0,0,vtkDataObject::FIELD_ASSOCIATION_NONE,"name")
to specify these fields, where i=0 is the source field, and i=1 is the
target field.
The second (optional) vtkTable has one row for each node in the graph.
The table must have a field whose values match those in the arc table.
Use
SetInputArrayToProcess(2,1,0,vtkDataObject::FIELD_ASSOCIATION_NONE,"name")
to specify the node index field. If the node table is not given,
a node will be created for each unique source or target identifier
in the arc table.
Input arrays 0, 1 and 2 must be of the same type, and must be either
vtkStringArray or a subclass of vtkDataArray.
All columns in the tables, including the source, target, and node index
fields, are copied into the arc data and node data of the resulting
vtkGraph. If the node table is not given, the node data will contain
a single "id" column with the same type as the source/target id arrays.
If parallel arcs are collected, not all the arc data is not copied into
the output. Only the source and target id arrays will be transferred.
An additional vtkIdTypeArray column called "weight" is created which
contains the number of times each arc appeared in the input.
If the node table contains positional data, the user may specify these
with input arrays 3, 4 and 5 for x, y, and z-coordinates, respectively.
These arrays must be data arrays. The z-coordinate array is optional,
and if not given the z-coordinates are set to zero.
"""
class vtkTableToTreeFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTableToTreeFilter - Filter that converts a vtkTable to a vtkTree
Super Class:
vtkTreeAlgorithm
vtkTableToTreeFilter is a filter for converting a vtkTable data structure
into a vtkTree datastructure. Currently, this will convert the table into
a star, with each row of the table as a child of a new root node.
The columns of the table are passed as node fields of the tree.
"""
class vtkTableWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkTableWriter - write vtkTable to a file
Super Class:
vtkDataWriter
vtkTableWriter is a sink object that writes ASCII or binary
vtkTable data files in vtk format. See text for format details.
Caveats:
Binary files written on one system may not be readable on other systems.
"""
class vtkTemporalDataSetAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTemporalDataSetAlgorithm - Superclass for algorithms that produce only vtkTemporalDataSet as output
Super Class:
vtkAlgorithm
Algorithms that take any type of data object (including composite dataset)
and produce a vtkTemporalDataSet in the output can subclass from this
class.
"""
class vtkTemporalDataSetCache:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTemporalDataSetCache - cache time steps
Super Class:
vtkTemporalDataSetAlgorithm
vtkTemporalDataSetCache cache time step requests of a temporal dataset,
when cached data is requested it is returned using a shallow copy.
"""
class vtkTemporalInterpolator:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTemporalInterpolator - interpolate temporal datasets
Super Class:
vtkTemporalDataSetAlgorithm
vtkTemporalInterpolator interpolates between two time steps to
produce new data for an arbitrary T.
vtkTemporalInterpolator has two modes of operation. The default
mode is to produce a continuous range of time values as output
which enables a filter downstream to request Any value of T within
the range. The interpolator will produce the requested T.
The second mode of operation is enabled by setting
DiscreteTimeStepInterval to a non zero value. When this mode is
activated, the filter will report a finite number of Time steps
separated by deltaT between the original range of values.
This mode is useful when a dataset of N time steps has one (or more)
missing datasets for certain T values and you simply wish to smooth
over the missing steps but otherwise use the original data.
"""
class vtkTemporalShiftScale:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTemporalShiftScale - modify the time range/steps of temporal data
Super Class:
vtkTemporalDataSetAlgorithm
vtkTemporalShiftScale modify the time range or time steps of
the data without changing the data itself. The data is not resampled
by this filter, only the information accompanying the data is modified.
"""
class vtkTemporalSnapToTimeStep:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTemporalSnapToTimeStep - modify the time range/steps of temporal data
Super Class:
vtkTemporalDataSetAlgorithm
vtkTemporalSnapToTimeStep modify the time range or time steps of
the data without changing the data itself. The data is not resampled
by this filter, only the information accompanying the data is modified.
"""
class vtkTensorGlyph:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTensorGlyph - scale and orient glyph(s) according to tensor eigenvalues and eigenvectors
Super Class:
vtkPolyDataAlgorithm
vtkTensorGlyph is a filter that copies a geometric representation
(specified as polygonal data) to every input point. The geometric
representation, or glyph, can be scaled and/or rotated according to
the tensor at the input point. Scaling and rotation is controlled
by the eigenvalues/eigenvectors of the tensor as follows. For each
tensor, the eigenvalues (and associated eigenvectors) are sorted to
determine the major, medium, and minor eigenvalues/eigenvectors.
If the boolean variable ThreeGlyphs is not set the major eigenvalue
scales the glyph in the x-direction, the medium in the y-direction,
and the minor in the z-direction. Then, the glyph is rotated so
that the glyph's local x-axis lies along the major eigenvector,
y-axis along the medium eigenvector, and z-axis along the minor.
If the boolean variable ThreeGlyphs is set three glyphs are produced,
each of them oriented along an eigenvector and scaled according to the
corresponding eigenvector.
If the boolean variable Symmetric is set each glyph is mirrored (2 or 6
glyphs will be produced)
The x-axis of the source glyph will correspond to the eigenvector
on output. Point (0,0,0) in the source will be placed in the data point.
Variable Length will normally correspond to the distance from the
origin to the tip of the source glyph along the x-axis,
but can be changed to produce other results when Symmetric is on,
e.g. glyphs that do not touch or that overlap.
Please note that when Symmetric is false it will generally be better
to place the source glyph from (-0.5,0,0) to (0.5,0,0), i.e. centred
at the origin. When symmetric is true the placement from (0,0,0) to
(1,0,0) will generally be more convenient.
A scale factor is provided to control the amount of scaling. Also, you
can turn off scaling completely if desired. The boolean variable
ClampScaling controls the maximum scaling (in conjunction with
MaxScaleFactor.) This is useful in certain applications where
singularities or large order of magnitude differences exist in
the eigenvalues.
If the boolean variable ColorGlyphs is set to true the glyphs are
colored. The glyphs can be colored using the input scalars
(SetColorModeToScalars), which is the default, or colored using the
eigenvalues (SetColorModeToEigenvalues).
Another instance variable, ExtractEigenvalues, has been provided to
control extraction of eigenvalues/eigenvectors. If this boolean is
false, then eigenvalues/eigenvectors are not extracted, and the
columns of the tensor are taken as the eigenvectors (the norm of
column, always positive, is the eigenvalue). This allows
additional capability over the vtkGlyph3D object. That is, the
glyph can be oriented in three directions instead of one.
See Also:
vtkGlyph3D vtkPointLoad vtkHyperStreamline
"""
class vtkTessellatorFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTessellatorFilter - approximate nonlinear FEM elements with simplices
Super Class:
vtkUnstructuredGridAlgorithm
This class approximates nonlinear FEM elements with linear simplices.
<b>Warning</b>: This class is temporary and will go away at some point
after ParaView 1.4.0.
This filter rifles through all the cells in an input vtkDataSet. It
tesselates each cell and uses the vtkStreamingTessellator and
vtkDataSetEdgeSubdivisionCriterion classes to generate simplices that
approximate the nonlinear mesh using some approximation metric (encoded
in the particular vtkDataSetEdgeSubdivisionCriterion::EvaluateEdge
implementation). The simplices are placed into the filter's output
vtkDataSet object by the callback routines AddATetrahedron,
AddATriangle, and AddALine, which are registered with the triangulator.
The output mesh will have geometry and any fields specified as
attributes in the input mesh's point data. The attribute's copy flags
are honored, except for normals.
.SECTION Internals
The filter's main member function is RequestData(). This function first
calls SetupOutput() which allocates arrays and some temporary variables
for the primitive callbacks (OutputTriangle and OutputLine which are
called by AddATriangle and AddALine, respectively). Each cell is given
an initial tesselation, which results in one or more calls to
OutputTetrahedron, OutputTriangle or OutputLine to add elements to the
OutputMesh. Finally, Teardown() is called to free the filter's working
space.
See Also:
vtkDataSetToUnstructuredGridFilter vtkDataSet vtkStreamingTessellator
vtkDataSetEdgeSubdivisionCriterion
"""
class vtkTextSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTextSource - create polygonal text
Super Class:
vtkPolyDataAlgorithm
vtkTextSource converts a text string into polygons. This way you can
insert text into your renderings. It uses the 9x15 font from X Windows.
You can specify if you want the background to be drawn or not. The
characters are formed by scan converting the raster font into
quadrilaterals. Colors are assigned to the letters using scalar data.
To set the color of the characters with the source's actor property, set
BackingOff on the text source and ScalarVisibilityOff on the associated
vtkPolyDataMapper. Then, the color can be set using the associated actor's
property.
vtkVectorText generates higher quality polygonal representations of
characters.
See Also:
vtkVectorText
"""
class vtkTexture:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkOpenGLTexture - OpenGL texture map
Super Class:
vtkTexture
vtkOpenGLTexture is a concrete implementation of the abstract class
vtkTexture. vtkOpenGLTexture interfaces to the OpenGL rendering library.
"""
class vtkTextureMapToCylinder:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTextureMapToCylinder - generate texture coordinates by mapping points to cylinder
Super Class:
vtkDataSetAlgorithm
vtkTextureMapToCylinder is a filter that generates 2D texture coordinates
by mapping input dataset points onto a cylinder. The cylinder can either be
user specified or generated automatically. (The cylinder is generated
automatically by computing the axis of the cylinder.) Note that the
generated texture coordinates for the s-coordinate ranges from (0-1)
(corresponding to angle of 0->360 around axis), while the mapping of
the t-coordinate is controlled by the projection of points along the axis.
To specify a cylinder manually, you must provide two points that
define the axis of the cylinder. The length of the axis will affect the
t-coordinates.
A special ivar controls how the s-coordinate is generated. If PreventSeam
is set to true, the s-texture varies from 0->1 and then 1->0 (corresponding
to angles of 0->180 and 180->360).
Caveats:
Since the resulting texture s-coordinate will lie between (0,1), and the
origin of the texture coordinates is not user-controllable, you may want
to use the class vtkTransformTexture to linearly scale and shift the origin
of the texture coordinates.
See Also:
vtkTextureMapToPlane vtkTextureMapToSphere vtkTextureMapToBox
vtkTransformTexture vtkThresholdTextureCoords
"""
class vtkTextureMapToPlane:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTextureMapToPlane - generate texture coordinates by mapping points to plane
Super Class:
vtkDataSetAlgorithm
vtkTextureMapToPlane is a filter that generates 2D texture coordinates
by mapping input dataset points onto a plane. The plane can either be
user specified or generated automatically. (A least squares method is
used to generate the plane automatically.)
There are two ways you can specify the plane. The first is to provide a
plane normal. In this case the points are projected to a plane, and the
points are then mapped into the user specified s-t coordinate range. For
more control, you can specify a plane with three points: an origin and two
points defining the two axes of the plane. (This is compatible with the
vtkPlaneSource.) Using the second method, the SRange and TRange vectors
are ignored, since the presumption is that the user does not want to scale
the texture coordinates; and you can adjust the origin and axes points to
achieve the texture coordinate scaling you need. Note also that using the
three point method the axes do not have to be orthogonal.
See Also:
vtkTextureMapToBox vtkPlaneSource vtkTextureMapToCylinder
vtkTextureMapToSphere vtkThresholdTextureCoords
"""
class vtkTextureMapToSphere:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTextureMapToSphere - generate texture coordinates by mapping points to sphere
Super Class:
vtkDataSetAlgorithm
vtkTextureMapToSphere is a filter that generates 2D texture coordinates by
mapping input dataset points onto a sphere. The sphere can either be user
specified or generated automatically. (The sphere is generated
automatically by computing the center (i.e., averaged coordinates) of the
sphere.) Note that the generated texture coordinates range between
(0,1). The s-coordinate lies in the angular direction around the z-axis,
measured counter-clockwise from the x-axis. The t-coordinate lies in the
angular direction measured down from the north pole towards the south
pole.
A special ivar controls how the s-coordinate is generated. If PreventSeam
is set to true, the s-texture varies from 0->1 and then 1->0 (corresponding
to angles of 0->180 and 180->360).
Caveats:
The resulting texture coordinates will lie between (0,1), and the texture
coordinates are determined with respect to the modeler's x-y-z coordinate
system. Use the class vtkTransformTextureCoords to linearly scale and
shift the origin of the texture coordinates (if necessary).
See Also:
vtkTextureMapToPlane vtkTextureMapToCylinder vtkTextureMapToBox
vtkTransformTexture vtkThresholdTextureCoords
"""
class vtkTexturedSphereSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTexturedSphereSource - create a sphere centered at the origin
Super Class:
vtkPolyDataAlgorithm
vtkTexturedSphereSource creates a polygonal sphere of specified radius
centered at the origin. The resolution (polygonal discretization) in both
the latitude (phi) and longitude (theta) directions can be specified.
It also is possible to create partial sphere by specifying maximum phi and
theta angles.
"""
class vtkThreshold:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkThreshold - extracts cells where scalar value in cell satisfies threshold criterion
Super Class:
vtkUnstructuredGridAlgorithm
vtkThreshold is a filter that extracts cells from any dataset type that
satisfy a threshold criterion. A cell satisfies the criterion if the
scalar value of (every or any) point satisfies the criterion. The
criterion can take three forms: 1) greater than a particular value; 2)
less than a particular value; or 3) between two values. The output of this
filter is an unstructured grid.
Note that scalar values are available from the point and cell attribute
data. By default, point data is used to obtain scalars, but you can
control this behavior. See the AttributeMode ivar below.
By default only the first scalar value is used in the decision. Use the ComponentMode
and SelectedComponent ivars to control this behavior.
See Also:
vtkThresholdPoints vtkThresholdTextureCoords
"""
class vtkThresholdPoints:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkThresholdPoints - extracts points whose scalar value satisfies threshold criterion
Super Class:
vtkPolyDataAlgorithm
vtkThresholdPoints is a filter that extracts points from a dataset that
satisfy a threshold criterion. The criterion can take three forms:
1) greater than a particular value; 2) less than a particular value; or
3) between a particular value. The output of the filter is polygonal data.
See Also:
vtkThreshold
"""
class vtkThresholdTable:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkThresholdTable - Thresholds table rows.
Super Class:
vtkTableAlgorithm
vtkThresholdTable uses minimum and/or maximum values to threshold
table rows based on the values in a particular column.
The column to threshold is specified using SetInputArrayToProcess(0, ...).
"""
class vtkThresholdTextureCoords:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkThresholdTextureCoords - compute 1D, 2D, or 3D texture coordinates based on scalar threshold
Super Class:
vtkDataSetAlgorithm
vtkThresholdTextureCoords is a filter that generates texture coordinates for
any input dataset type given a threshold criterion. The criterion can take
three forms: 1) greater than a particular value (ThresholdByUpper());
2) less than a particular value (ThresholdByLower(); or 3) between two
values (ThresholdBetween(). If the threshold criterion is satisfied,
the "in" texture coordinate will be set (this can be specified by the
user). If the threshold criterion is not satisfied the "out" is set.
Caveats:
There is a texture map - texThres.vtk - that can be used in conjunction
with this filter. This map defines a "transparent" region for texture
coordinates 0<=r<0.5, and an opaque full intensity map for texture
coordinates 0.5<r<=1.0. There is a small transition region for r=0.5.
See Also:
vtkThreshold vtkThresholdPoints vtkTextureMapToPlane vtkTextureMapToSphere
vtkTextureMapToCylinder vtkTextureMapToBox
"""
class vtkTimePointToString:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTimePointToString - Converts a timestamp array to a string array
Super Class:
vtkDataObjectAlgorithm
vtkTimePointToString is a filter for converting a timestamp array
into string array using one of the formats defined in vtkTimePointUtility.h.
Use SetInputArrayToProcess to indicate the array to process.
This array must be an unsigned 64-bit integer array for
DATETIME formats, and may be either an unsigned 32-bit or
unsigned 64-bit array for DATE and TIME formats.
If the new array name is not specified, the array name will be
the old name appended by " [to string]".
"""
class vtkTimeSourceExample:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTimeSource - creates a simple time varying data set.
Super Class:
vtkImageAlgorithm
Creates a small easily understood time varying data set for testing.
The output is a vtkImageData in which the point and cell values vary
over time in a sin wave. The analytic ivar controls whether the output
corresponds to a step function over time or is continuous.
The X and Y Amplitude ivars make the output move in the X and Y directions
over time. The Growing ivar makes the number of cells in the output grow
and then shrink over time.
"""
class vtkTransformFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTransformFilter - transform points and associated normals and vectors
Super Class:
vtkPointSetAlgorithm
vtkTransformFilter is a filter to transform point coordinates, and
associated point normals and vectors. Other point data is passed
through the filter.
An alternative method of transformation is to use vtkActor's methods
to scale, rotate, and translate objects. The difference between the
two methods is that vtkActor's transformation simply effects where
objects are rendered (via the graphics pipeline), whereas
vtkTransformFilter actually modifies point coordinates in the
visualization pipeline. This is necessary for some objects
(e.g., vtkProbeFilter) that require point coordinates as input.
See Also:
vtkAbstractTransform vtkTransformPolyDataFilter vtkActor
"""
class vtkTransformPolyDataFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTransformPolyDataFilter - transform points and associated normals and vectors for polygonal dataset
Super Class:
vtkPolyDataAlgorithm
vtkTransformPolyDataFilter is a filter to transform point
coordinates and associated point and cell normals and
vectors. Other point and cell data is passed through the filter
unchanged. This filter is specialized for polygonal data. See
vtkTransformFilter for more general data.
An alternative method of transformation is to use vtkActor's methods
to scale, rotate, and translate objects. The difference between the
two methods is that vtkActor's transformation simply effects where
objects are rendered (via the graphics pipeline), whereas
vtkTransformPolyDataFilter actually modifies point coordinates in the
visualization pipeline. This is necessary for some objects
(e.g., vtkProbeFilter) that require point coordinates as input.
See Also:
vtkTransform vtkTransformFilter vtkActor
"""
class vtkTransformTextureCoords:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTransformTextureCoords - transform (scale, rotate, translate) texture coordinates
Super Class:
vtkDataSetAlgorithm
vtkTransformTextureCoords is a filter that operates on texture
coordinates. It ingests any type of dataset, and outputs a dataset of the
same type. The filter lets you scale, translate, and rotate texture
coordinates. For example, by using the the Scale ivar, you can shift
texture coordinates that range from (0->1) to range from (0->10) (useful
for repeated patterns).
The filter operates on texture coordinates of dimension 1->3. The texture
coordinates are referred to as r-s-t. If the texture map is two dimensional,
the t-coordinate (and operations on the t-coordinate) are ignored.
See Also:
vtkTextureMapToPlane vtkTextureMapToBox vtkTextureMapToCylinder
vtkTextureMapToSphere vtkThresholdTextureCoords vtkTexture
"""
class vtkTransformToGrid:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTransformToGrid - create a grid for a vtkGridTransform
Super Class:
vtkAlgorithm
vtkTransformToGrid takes any transform as input and produces a grid
for use by a vtkGridTransform. This can be used, for example, to
invert a grid transform, concatenate two grid transforms, or to
convert a thin plate spline transform into a grid transform.
See Also:
vtkGridTransform vtkThinPlateSplineTransform vtkAbstractTransform
"""
class vtkTreeAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTreeAlgorithm - Superclass for algorithms that produce only Tree as output
Super Class:
vtkAlgorithm
vtkTreeAlgorithm is a convenience class to make writing algorithms
easier. It is also designed to help transition old algorithms to the new
pipeline edgehitecture. There are some assumptions and defaults made by this
class you should be aware of. This class defaults such that your filter
will have one input port and one output port. If that is not the case
simply change it with SetNumberOfInputPorts etc. See this class
constructor for the default. This class also provides a FillInputPortInfo
method that by default says that all inputs will be Tree. If that
isn't the case then please override this method in your subclass. This
class breaks out the downstream requests into separate functions such as
ExecuteData and ExecuteInformation. For new algorithms you should
implement RequestData( request, inputVec, outputVec) but for older filters
there is a default implementation that calls the old ExecuteData(output)
signature. For even older filters that don't implement ExecuteData the
default implementation calls the even older Execute() signature.
"""
class vtkTreeFieldAggregator:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTreeFieldAggregator - aggregate field values from the leaves up the tree
Super Class:
vtkTreeAlgorithm
vtkTreeFieldAggregator may be used to assign sizes to all the vertices in the
tree, based on the sizes of the leaves. The size of a vertex will equal
the sum of the sizes of the child vertices. If you have a data array with
values for all leaves, you may specify that array, and the values will
be filled in for interior tree vertices. If you do not yet have an array,
you may tell the filter to create a new array, assuming that the size
of each leaf vertex is 1. You may optionally set a flag to first take the
log of all leaf values before aggregating.
"""
class vtkTreeLevelsFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTreeLevelsFilter - adds level and leaf fields to a vtkTree
Super Class:
vtkTreeAlgorithm
The filter currently add two arrays to the incoming vtkTree datastructure.
1) "levels" this is the distance from the root of the vertex. Root = 0
and you add 1 for each level down from the root
2) "leaf" this array simply indicates whether the vertex is a leaf or not
.SECTION Thanks
Thanks to Brian Wylie from Sandia National Laboratories for creating this
class.
"""
class vtkTreeMapLayout:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTreeMapLayout - layout a vtkTree into a tree map
Super Class:
vtkTreeAlgorithm
vtkTreeMapLayout assigns rectangular regions to each vertex in the tree,
creating a tree map. The data is added as a data array with four
components per tuple representing the location and size of the
rectangle using the format (Xmin, Xmax, Ymin, Ymax).
This algorithm relies on a helper class to perform the actual layout.
This helper class is a subclass of vtkTreeMapLayoutStrategy.
.SECTION Thanks
Thanks to Brian Wylie and Ken Moreland from Sandia National Laboratories
for help developing this class.
Tree map concept comes from:
Shneiderman, B. 1992. Tree visualization with tree-maps: 2-d space-filling approach.
ACM Trans. Graph. 11, 1 (Jan. 1992), 92-99.
"""
class vtkTreeMapToPolyData:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTreeMapToPolyData - converts a tree to a polygonal data representing a tree map
Super Class:
vtkPolyDataAlgorithm
This algorithm requires that the vtkTreeMapLayout filter has already applied to the
data in order to create the quadruple array (min x, max x, min y, max y) of
bounds for each vertex of the tree.
"""
class vtkTreeReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkTreeReader - read vtkTree data file
Super Class:
vtkDataReader
vtkTreeReader is a source object that reads ASCII or binary
vtkTree data files in vtk format. (see text for format details).
The output of this reader is a single vtkTree data object.
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkTree vtkDataReader vtkTreeWriter
"""
class vtkTreeWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkTreeWriter - write vtkTree data to a file
Super Class:
vtkDataWriter
vtkTreeWriter is a sink object that writes ASCII or binary
vtkTree data files in vtk format. See text for format details.
Caveats:
Binary files written on one system may not be readable on other systems.
"""
class vtkTriangleFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTriangleFilter - create triangle polygons from input polygons and triangle strips
Super Class:
vtkPolyDataAlgorithm
vtkTriangleFilter generates triangles from input polygons and triangle
strips. The filter also will pass through vertices and lines, if
requested.
"""
class vtkTriangularTCoords:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTriangularTCoords - 2D texture coordinates based for triangles.
Super Class:
vtkPolyDataAlgorithm
vtkTriangularTCoords is a filter that generates texture coordinates
for triangles. Texture coordinates for each triangle are:
(0,0), (1,0) and (.5,sqrt(3)/2). This filter assumes that the triangle
texture map is symmetric about the center of the triangle. Thus the order
Of the texture coordinates is not important. The procedural texture
in vtkTriangularTexture is designed with this symmetry. For more information
see the paper "Opacity-modulating Triangular Textures for Irregular
Surfaces," by Penny Rheingans, IEEE Visualization '96, pp. 219-225.
See Also:
vtkTriangularTexture vtkThresholdPoints vtkTextureMapToPlane
vtkTextureMapToSphere vtkTextureMapToCylinder vtkTextureMapToBox
"""
class vtkTriangularTexture:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTriangularTexture - generate 2D triangular texture map
Super Class:
vtkImageAlgorithm
vtkTriangularTexture is a filter that generates a 2D texture map based on
the paper "Opacity-modulating Triangular Textures for Irregular Surfaces,"
by Penny Rheingans, IEEE Visualization '96, pp. 219-225.
The textures assume texture coordinates of (0,0), (1.0) and
(.5, sqrt(3)/2). The sequence of texture values is the same along each
edge of the triangular texture map. So, the assignment order of texture
coordinates is arbitrary.
See Also:
vtkTriangularTCoords
"""
class vtkTrivialProducer:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTrivialProducer - Producer for stand-alone data objects.
Super Class:
vtkAlgorithm
vtkTrivialProducer allows stand-alone data objects to be connected
as inputs in a pipeline. All data objects that are connected to a
pipeline involving vtkAlgorithm must have a producer. This trivial
producer allows data objects that are hand-constructed in a program
without another vtk producer to be connected.
"""
class vtkTubeFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkTubeFilter - filter that generates tubes around lines
Super Class:
vtkPolyDataAlgorithm
vtkTubeFilter is a filter that generates a tube around each input line.
The tubes are made up of triangle strips and rotate around the tube with
the rotation of the line normals. (If no normals are present, they are
computed automatically.) The radius of the tube can be set to vary with
scalar or vector value. If the radius varies with scalar value the radius
is linearly adjusted. If the radius varies with vector value, a mass
flux preserving variation is used. The number of sides for the tube also
can be specified. You can also specify which of the sides are visible. This
is useful for generating interesting striping effects. Other options
include the ability to cap the tube and generate texture coordinates.
Texture coordinates can be used with an associated texture map to create
interesting effects such as marking the tube with stripes corresponding
to length or time.
This filter is typically used to create thick or dramatic lines. Another
common use is to combine this filter with vtkStreamLine to generate
streamtubes.
Caveats:
The number of tube sides must be greater than 3. If you wish to use fewer
sides (i.e., a ribbon), use vtkRibbonFilter.
The input line must not have duplicate points, or normals at points that
are parallel to the incoming/outgoing line segments. (Duplicate points
can be removed with vtkCleanPolyData.) If a line does not meet this
criteria, then that line is not tubed.
See Also:
vtkRibbonFilter vtkStreamLine
"""
class vtkUGFacetReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkUGFacetReader - read EDS Unigraphics facet files
Super Class:
vtkPolyDataAlgorithm
vtkUGFacetReader is a source object that reads Unigraphics facet files.
Unigraphics is a solid modeling system; facet files are the polygonal
plot files it uses to create 3D plots.
"""
class vtkUnstructuredGridAlgorithm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkUnstructuredGridAlgorithm - Superclass for algorithms that produce only unstructured grid as output
Super Class:
vtkAlgorithm
"""
class vtkUnstructuredGridGeometryFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkUnstructuredGridGeometryFilter - extract geometry from an unstructured grid
Super Class:
vtkUnstructuredGridAlgorithm
vtkUnstructuredGridGeometryFilter is a filter to extract
geometry (and associated data) from an unstructured grid. It differs from
vtkGeometryFilter by not tessellating higher order faces: 2D faces of
quadratic 3D cells will be quadratic. A quadratic edge is extracted as a
quadratic edge. For that purpose, the output of this filter is an
unstructured grid, not a polydata.
Also, the face of a voxel is pixel, not a quad.
Geometry is obtained as follows: all 0D, 1D, and 2D cells are extracted.
All 2D faces that are used by only one 3D cell (i.e., boundary faces) are
extracted. It also is possible to specify conditions on point ids, cell ids,
and on bounding box (referred to as "Extent") to control the extraction
process.
Caveats:
When vtkUnstructuredGridGeometryFilter extracts cells (or boundaries of
cells) it will (by default) merge duplicate vertices. This may cause
problems in some cases. Turn merging off to prevent this from occurring.
See Also:
vtkGeometryFilter
"""
class vtkUnstructuredGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkUnstructuredGridReader - read vtk unstructured grid data file
Super Class:
vtkDataReader
vtkUnstructuredGridReader is a source object that reads ASCII or binary
unstructured grid data files in vtk format. (see text for format details).
The output of this reader is a single vtkUnstructuredGrid data object.
The superclass of this class, vtkDataReader, provides many methods for
controlling the reading of the data file, see vtkDataReader for more
information.
Caveats:
Binary files written on one system may not be readable on other systems.
See Also:
vtkUnstructuredGrid vtkDataReader
"""
class vtkUnstructuredGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkUnstructuredGridWriter - write vtk unstructured grid data file
Super Class:
vtkDataWriter
vtkUnstructuredGridWriter is a source object that writes ASCII or binary
unstructured grid data files in vtk format. See text for format details.
Caveats:
Binary files written on one system may not be readable on other systems.
"""
class vtkVectorDot:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkVectorDot - generate scalars from dot product of vectors and normals (e.g., show displacement plot)
Super Class:
vtkDataSetAlgorithm
vtkVectorDot is a filter to generate scalar values from a dataset.
The scalar value at a point is created by computing the dot product
between the normal and vector at that point. Combined with the appropriate
color map, this can show nodal lines/mode shapes of vibration, or a
displacement plot.
"""
class vtkVectorNorm:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkVectorNorm - generate scalars from Euclidean norm of vectors
Super Class:
vtkDataSetAlgorithm
vtkVectorNorm is a filter that generates scalar values by computing
Euclidean norm of vector triplets. Scalars can be normalized
0<=s<=1 if desired.
Note that this filter operates on point or cell attribute data, or
both. By default, the filter operates on both point and cell data
if vector point and cell data, respectively, are available from the
input. Alternatively, you can choose to generate scalar norm values
for just cell or point data.
"""
class vtkVectorText:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkVectorText - create polygonal text
Super Class:
vtkPolyDataAlgorithm
See Also:
vtkTextMapper vtkCaptionActor2D
"""
class vtkVertexDegree:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkVertexDegree - Adds an attribute array with the degree of each vertex
Super Class:
vtkAbstractGraphAlgorithm
Adds an attribute array with the degree of each vertex.
"""
class vtkVertexGlyphFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkVertexGlyphFilter - Make a vtkPolyData with a vertex on each point.
Super Class:
vtkPolyDataAlgorithm
This filter throws away all of the cells in the input and replaces them with
a vertex on each point. The intended use of this filter is roughly
equivalent to the vtkGlyph3D filter, except this filter is specifically for
data that has many vertices, making the rendered result faster and less
cluttered than the glyph filter.
"""
class vtkVideoSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkVideoSource - Superclass of video input devices for VTK
Super Class:
vtkImageAlgorithm
vtkVideoSource is a superclass for video input interfaces for VTK.
The goal is to provide an interface which is very similar to the
interface of a VCR, where the 'tape' is an internal frame buffer
capable of holding a preset number of video frames. Specialized
versions of this class record input from various video input sources.
This base class records input from a noise source.
Caveats:
You must call the ReleaseSystemResources() method before the application
exits. Otherwise the application might hang while trying to exit.
See Also:
vtkWin32VideoSource vtkMILVideoSource
"""
class vtkVolume16Reader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkVolume16Reader - read 16 bit image files
Super Class:
vtkVolumeReader
vtkVolume16Reader is a source object that reads 16 bit image files.
Volume16Reader creates structured point datasets. The dimension of the
dataset depends upon the number of files read. Reading a single file
results in a 2D image, while reading more than one file results in a
3D volume.
File names are created using FilePattern and FilePrefix as follows:
sprintf (filename, FilePattern, FilePrefix, number);
where number is in the range ImageRange[0] to ImageRange[1]. If
ImageRange[1] <= ImageRange[0], then slice number ImageRange[0] is
read. Thus to read an image set ImageRange[0] = ImageRange[1] = slice
number. The default behavior is to read a single file (i.e., image slice 1).
The DataMask instance variable is used to read data files with imbedded
connectivity or segmentation information. For example, some data has
the high order bit set to indicate connected surface. The DataMask allows
you to select this data. Other important ivars include HeaderSize, which
allows you to skip over initial info, and SwapBytes, which turns on/off
byte swapping.
The Transform instance variable specifies a permutation transformation
to map slice space into world space. vtkImageReader has replaced the
functionality of this class and should be used instead.
See Also:
vtkSliceCubes vtkMarchingCubes vtkImageReader
"""
class vtkVoxelContoursToSurfaceFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkVoxelContoursToSurfaceFilter - create surface from contours
Super Class:
vtkPolyDataAlgorithm
vtkVoxelContoursToSurfaceFilter is a filter that takes contours and
produces surfaces. There are some restrictions for the contours:
- The contours are input as vtkPolyData, with the contours being
polys in the vtkPolyData.
- The contours lie on XY planes - each contour has a constant Z
- The contours are ordered in the polys of the vtkPolyData such
that all contours on the first (lowest) XY plane are first, then
continuing in order of increasing Z value.
- The X, Y and Z coordinates are all integer values.
- The desired sampling of the contour data is 1x1x1 - Aspect can
be used to control the aspect ratio in the output polygonal
dataset.
This filter takes the contours and produces a structured points
dataset of signed floating point number indicating distance from
a contour. A contouring filter is then applied to generate 3D
surfaces from a stack of 2D contour distance slices. This is
done in a streaming fashion so as not to use to much memory.
See Also:
vtkPolyDataAlgorithm
"""
class vtkVoxelModeller:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkVoxelModeller - convert an arbitrary dataset to a voxel representation
Super Class:
vtkImageAlgorithm
vtkVoxelModeller is a filter that converts an arbitrary data set to a
structured point (i.e., voxel) representation. It is very similar to
vtkImplicitModeller, except that it doesn't record distance; instead it
records occupancy. As such, it stores its results in the more compact
form of 0/1 bits.
See Also:
vtkImplicitModeller
"""
class vtkWarpLens:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkWarpLens - deform geometry by applying lens distortion
Super Class:
vtkPointSetAlgorithm
vtkWarpLens is a filter that modifies point coordinates by moving
in accord with a lens distortion model.
"""
class vtkWarpScalar:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkWarpScalar - deform geometry with scalar data
Super Class:
vtkPointSetAlgorithm
vtkWarpScalar is a filter that modifies point coordinates by moving
points along point normals by the scalar amount times the scale factor.
Useful for creating carpet or x-y-z plots.
If normals are not present in data, the Normal instance variable will
be used as the direction along which to warp the geometry. If normals are
present but you would like to use the Normal instance variable, set the
UseNormal boolean to true.
If XYPlane boolean is set true, then the z-value is considered to be
a scalar value (still scaled by scale factor), and the displacement is
along the z-axis. If scalars are also present, these are copied through
and can be used to color the surface.
Note that the filter passes both its point data and cell data to
its output, except for normals, since these are distorted by the
warping.
"""
class vtkWarpTo:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkWarpTo - deform geometry by warping towards a point
Super Class:
vtkPointSetAlgorithm
vtkWarpTo is a filter that modifies point coordinates by moving the
points towards a user specified position.
"""
class vtkWarpVector:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkWarpVector - deform geometry with vector data
Super Class:
vtkPointSetAlgorithm
vtkWarpVector is a filter that modifies point coordinates by moving
points along vector times the scale factor. Useful for showing flow
profiles or mechanical deformation.
The filter passes both its point data and cell data to its output.
"""
class vtkWeightedTransformFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkWeightedTransformFilter - transform based on per-point or per-cell weighting functions.
Super Class:
vtkPointSetAlgorithm
Caveats:
Weighted combination of normals and vectors are probably not appropriate
in many cases. Surface normals are treated somewhat specially, but
in many cases you may need to regenerate the surface normals.
Cell data can only be transformed if all transforms are linear.
See Also:
vtkAbstractTransform vtkLinearTransform vtkTransformPolyDataFilter vtkActor
"""
class vtkWin32VideoSource:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkWin32VideoSource - Video-for-Windows video digitizer
Super Class:
vtkVideoSource
vtkWin32VideoSource grabs frames or streaming video from a
Video for Windows compatible device on the Win32 platform.
Caveats:
With some capture cards, if this class is leaked and ReleaseSystemResources
is not called, you may have to reboot before you can capture again.
vtkVideoSource used to keep a global list and delete the video sources
if your program leaked, due to exit crashes that was removed.
See Also:
vtkVideoSource vtkMILVideoSource
"""
class vtkWindowToImageFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkWindowToImageFilter - Use a vtkWindow as input to image pipeline
Super Class:
vtkAlgorithm
vtkWindowToImageFilter provides methods needed to read the data in
a vtkWindow and use it as input to the imaging pipeline. This is
useful for saving an image to a file for example. The window can
be read as either RGB or RGBA pixels; in addition, the depth buffer
can also be read. RGB and RGBA pixels are of type unsigned char,
while Z-Buffer data is returned as floats. Use this filter
to convert RenderWindows or ImageWindows to an image format.
Caveats:
A vtkWindow doesn't behave like other parts of the VTK pipeline: its
modification time doesn't get updated when an image is rendered. As a
result, naive use of vtkWindowToImageFilter will produce an image of
the first image that the window rendered, but which is never updated
on subsequent window updates. This behavior is unexpected and in
general undesirable.
To force an update of the output image, call vtkWindowToImageFilter's
Modified method after re ...
[Truncated]
See Also:
vtkWindow vtkRenderLargeImage
"""
class vtkWindowedSincPolyDataFilter:
kits = ['vtk_kit']
cats = ['VTK basic filters']
help = \
"""vtkWindowedSincPolyDataFilter - adjust point positions using a windowed sinc function interpolation kernel
Super Class:
vtkPolyDataAlgorithm
vtkWindowedSincPolyDataFiler adjust point coordinate using a windowed
sinc function interpolation kernel. The effect is to "relax" the mesh,
making the cells better shaped and the vertices more evenly distributed.
Note that this filter operates the lines, polygons, and triangle strips
composing an instance of vtkPolyData. Vertex or poly-vertex cells are
never modified.
The algorithm proceeds as follows. For each vertex v, a topological and
geometric analysis is performed to determine which vertices are connected
to v, and which cells are connected to v. Then, a connectivity array is
constructed for each vertex. (The connectivity array is a list of lists
of vertices that directly attach to each vertex.) Next, an iteration
phase begins over all vertices. For each vertex v, the coordinates of v
are modified using a windowed sinc function interpolation kernel.
Taubin describes this methodology is the IBM tech report RC-20404
(#90237, dated 3/12/96) "Optimal Surface Smoothing as Filter Design"
G. Taubin, T. Zhang and G. Golub. (Zhang and Golub are at Stanford
University).
This report discusses using standard signal processing low-pass filters
(in particular windowed sinc functions) to smooth polyhedra. The
transfer functions of the low-pass filters are approximated by
Chebyshev polynomials. This facilitates applying the filters in an
iterative diffusion process (as opposed to a kernel convolution). The
more smoothing iterations applied, the higher the degree of polynomial
approximating the low-pass filter transfer function. Each smoothing
iteration, therefore, applies the next higher term of the Chebyshev
filter approximation to the polyhedron. This decoupling of the filter
into an iteratively applied polynomial is possible since the Chebyshev
polynomials are orthogonal, i.e. increasing the order of the
approximation to the filter transfer function does not alter the
previously calculated coefficients for the low order terms.
Note: Care must be taken to avoid smoothing with too few iterations.
A Chebyshev approximation with too few terms is an poor approximation.
The first few smoothing iterations represent a severe scaling and
translation of the data. Subsequent iterations cause the smoothed
polyhedron to converge to the true location and scale of the object.
We have attempted to protect against this by automatically adjusting
the filter, effectively widening the pass band. This adjustment is only
possible if the number of iterations is greater than 1. Note that this
sacrifices some degree of smoothing for model integrity. For those
interested, the filter is adjusted by searching for a value sigma
such that the actual pass band is k_pb + sigma and such that the
filter transfer function evaluates to unity at k_pb, i.e. f(k_pb) = 1
To improve the numerical stability of the solution and minimize the
scaling the translation effects, the algorithm can translate and
scale the position coordinates to within the unit cube [-1, 1],
perform the smoothing, and translate and scale the position
coordinates back to the original coordinate frame. This mode is
controlled with the NormalizeCoordinatesOn() /
NormalizeCoordinatesOff() methods. For legacy reasons, the default
is NormalizeCoordinatesOff.
This implementation is currently limited to using an interpolation
kernel based on Hamming windows. Other windows (such as Hann, Blackman,
Kaiser, Lanczos, Gaussian, and exponential windows) could be used
instead.
There are some special instance variables used to control the execution
of this filter. (These ivars basically control what vertices can be
smoothed, and the creation of the connectivity array.) The
BoundarySmoothing ivar enables/disables the smoothing operation on
vertices that are on the "boundary" of the mesh. A boundary vertex is one
that is surrounded by a semi-cycle of polygons (or used by a single
line).
Another important ivar is FeatureEdgeSmoothing. If this ivar is
enabled, then interior vertices are classified as either "simple",
"interior edge", or "fixed", and smoothed differently. (Interior
vertices are manifold vertices surrounded by a cycle of polygons; or used
by two line cells.) The classification is based on the number of feature
edges attached to v. A feature edge occurs when the angle between the two
surface normals of a polygon sharing an edge is greater than the
FeatureAngle ivar. Then, vertices used by no feature edges are classified
"simple", vertices used by exactly two feature edges are classified
"interior edge", and all others are "fixed" vertices.
Once the classification is known, the vertices are smoothed
differently. Corner (i.e., fixed) vertices are not smoothed at all.
Simple vertices are smoothed as before . Interior edge vertices are
smoothed only along their two connected edges, and only if the angle
between the edges is less than the EdgeAngle ivar.
The total smoothing can be controlled by using two ivars. The
NumberOfIterations determines the maximum number of smoothing passes.
The NumberOfIterations corresponds to the degree of the polynomial that
is used to approximate the windowed sinc function. Ten or twenty
iterations is all the is usually necessary. Contrast this with
vtkSmoothPolyDataFilter which usually requires 100 to 200 smoothing
iterations. vtkSmoothPolyDataFilter is also not an approximation to
an ideal low-pass filter, which can cause the geometry to shrink as the
amount of smoothing increases.
The second ivar is the specification of the PassBand for the windowed
sinc filter. By design, the PassBand is specified as a doubleing point
number between 0 and 2. Lower PassBand values produce more smoothing.
A good default value for the PassBand is 0.1 (for those interested, the
PassBand (and frequencies) for PolyData are based on the valence of the
vertices, this limits all the frequency modes in a polyhedral mesh to
between 0 and 2.)
There are two instance variables that control the generation of error
data. If the ivar GenerateErrorScalars is on, then a scalar value indicating
the distance of each vertex from its original position is computed. If the
ivar GenerateErrorVectors is on, then a vector representing change in
position is computed.
Caveats:
The smoothing operation reduces high frequency information in the
geometry of the mesh. With excessive smoothing important details may be
lost. Enabling FeatureEdgeSmoothing helps reduce this effect, but cannot
entirely eliminate it.
See Also:
vtkSmoothPolyDataFilter vtkDecimate vtkDecimatePro
"""
class vtkXMLDataSetWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLDataSetWriter - Write any type of VTK XML file.
Super Class:
vtkXMLWriter
vtkXMLDataSetWriter is a wrapper around the VTK XML file format
writers. Given an input vtkDataSet, the correct writer is
automatically selected based on the type of input.
See Also:
vtkXMLImageDataWriter vtkXMLStructuredGridWriter
vtkXMLRectilinearGridWriter vtkXMLPolyDataWriter
vtkXMLUnstructuredGridWriter
"""
class vtkXMLHierarchicalBoxDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLHierarchicalBoxDataReader - Reader for hierarchical datasets
Super Class:
vtkXMLHierarchicalDataReader
vtkXMLHierarchicalBoxDataReader reads the VTK XML hierarchical data file
format. XML hierarchical data files are meta-files that point to a list
of serial VTK XML files. When reading in parallel, it will distribute
sub-blocks among processor. If the number of sub-blocks is less than
the number of processors, some processors will not have any sub-blocks
for that level. If the number of sub-blocks is larger than the
number of processors, each processor will possibly have more than
1 sub-block.
"""
class vtkXMLHierarchicalDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLHierarchicalDataReader - Reader for hierarchical datasets
Super Class:
vtkXMLMultiGroupDataReader
vtkXMLHierarchicalDataReader reads the VTK XML hierarchical data file
format. XML hierarchical data files are meta-files that point to a list
of serial VTK XML files. When reading in parallel, it will distribute
sub-blocks among processor. If the number of sub-blocks is less than
the number of processors, some processors will not have any sub-blocks
for that level. If the number of sub-blocks is larger than the
number of processors, each processor will possibly have more than
1 sub-block.
"""
class vtkXMLHyperOctreeReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLHyperOctreeReader - Read VTK XML HyperOctree files.
Super Class:
vtkXMLDataReader
vtkXMLHyperOctreeReader reads the VTK XML HyperOctree file
format. One rectilinear grid file can be read to produce one
output. Streaming is supported. The standard extension for this
reader's file format is "vto". This reader is also used to read a
single piece of the parallel file format.
See Also:
vtkXMLPHyperOctreeReader
"""
class vtkXMLHyperOctreeWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLHyperOctreeWriter - Write VTK XML HyperOctree files.
Super Class:
vtkXMLWriter
vtkXMLHyperOctreeWriter writes the VTK XML HyperOctree file
format. One HyperOctree input can be written into one file in
any number of streamed pieces. The standard extension for this
writer's file format is "vto". This writer is also used to write a
single piece of the parallel file format.
See Also:
vtkXMLPHyperOctreeWriter
"""
class vtkXMLImageDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLImageDataReader - Read VTK XML ImageData files.
Super Class:
vtkXMLStructuredDataReader
vtkXMLImageDataReader reads the VTK XML ImageData file format. One
image data file can be read to produce one output. Streaming is
supported. The standard extension for this reader's file format is
"vti". This reader is also used to read a single piece of the
parallel file format.
See Also:
vtkXMLPImageDataReader
"""
class vtkXMLImageDataWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLImageDataWriter - Write VTK XML ImageData files.
Super Class:
vtkXMLStructuredDataWriter
vtkXMLImageDataWriter writes the VTK XML ImageData file format.
One image data input can be written into one file in any number of
streamed pieces. The standard extension for this writer's file
format is "vti". This writer is also used to write a single piece
of the parallel file format.
See Also:
vtkXMLPImageDataWriter
"""
class vtkXMLMultiBlockDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLMultiBlockDataReader - Reader for multi-block datasets
Super Class:
vtkXMLMultiGroupDataReader
vtkXMLMultiBlockDataReader reads the VTK XML multi-block data file
format. XML multi-block data files are meta-files that point to a list
of serial VTK XML files. When reading in parallel, it will distribute
sub-blocks among processor. If the number of sub-blocks is less than
the number of processors, some processors will not have any sub-blocks
for that block. If the number of sub-blocks is larger than the
number of processors, each processor will possibly have more than
1 sub-block.
"""
class vtkXMLMultiGroupDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLMultiGroupDataReader - Reader for multi-group datasets
Super Class:
vtkXMLReader
vtkXMLMultiGroupDataReader reads the VTK XML multi-group data file
format. XML multi-group data files are meta-files that point to a list
of serial VTK XML files. When reading in parallel, it will distribute
sub-blocks among processor. If the number of sub-blocks is less than
the number of processors, some processors will not have any sub-blocks
for that group. If the number of sub-blocks is larger than the
number of processors, each processor will possibly have more than
1 sub-block.
"""
class vtkXMLMultiGroupDataWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLMultiGroupDataWriter - Writer for multi-group datasets
Super Class:
vtkXMLWriter
vtkXMLMultiGroupDataWriter writes (serially) the VTK XML multi-group,
multi-block hierarchical and hierarchical box files. XML multi-group
data files are meta-files that point to a list of serial VTK XML files.
See Also:
vtkXMLPMultiGroupDataWriter
"""
class vtkXMLPDataSetWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLPDataSetWriter - Write any type of PVTK XML file.
Super Class:
vtkXMLPDataWriter
vtkXMLPDataSetWriter is a wrapper around the PVTK XML file format
writers. Given an input vtkDataSet, the correct writer is
automatically selected based on the type of input.
See Also:
vtkXMLPImageDataWriter vtkXMLPStructuredGridWriter
vtkXMLPRectilinearGridWriter vtkXMLPPolyDataWriter
vtkXMLPUnstructuredGridWriter
"""
class vtkXMLPImageDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLPImageDataReader - Read PVTK XML ImageData files.
Super Class:
vtkXMLPStructuredDataReader
vtkXMLPImageDataReader reads the PVTK XML ImageData file format.
This reads the parallel format's summary file and then uses
vtkXMLImageDataReader to read data from the individual ImageData
piece files. Streaming is supported. The standard extension for
this reader's file format is "pvti".
See Also:
vtkXMLImageDataReader
"""
class vtkXMLPImageDataWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLPImageDataWriter - Write PVTK XML ImageData files.
Super Class:
vtkXMLPStructuredDataWriter
vtkXMLPImageDataWriter writes the PVTK XML ImageData file format.
One image data input can be written into a parallel file format
with any number of pieces spread across files. The standard
extension for this writer's file format is "pvti". This writer
uses vtkXMLImageDataWriter to write the individual piece files.
See Also:
vtkXMLImageDataWriter
"""
class vtkXMLPPolyDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLPPolyDataReader - Read PVTK XML PolyData files.
Super Class:
vtkXMLPUnstructuredDataReader
vtkXMLPPolyDataReader reads the PVTK XML PolyData file format.
This reads the parallel format's summary file and then uses
vtkXMLPolyDataReader to read data from the individual PolyData
piece files. Streaming is supported. The standard extension for
this reader's file format is "pvtp".
See Also:
vtkXMLPolyDataReader
"""
class vtkXMLPPolyDataWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLPPolyDataWriter - Write PVTK XML PolyData files.
Super Class:
vtkXMLPUnstructuredDataWriter
vtkXMLPPolyDataWriter writes the PVTK XML PolyData file format.
One poly data input can be written into a parallel file format with
any number of pieces spread across files. The standard extension
for this writer's file format is "pvtp". This writer uses
vtkXMLPolyDataWriter to write the individual piece files.
See Also:
vtkXMLPolyDataWriter
"""
class vtkXMLPRectilinearGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLPRectilinearGridReader - Read PVTK XML RectilinearGrid files.
Super Class:
vtkXMLPStructuredDataReader
vtkXMLPRectilinearGridReader reads the PVTK XML RectilinearGrid
file format. This reads the parallel format's summary file and
then uses vtkXMLRectilinearGridReader to read data from the
individual RectilinearGrid piece files. Streaming is supported.
The standard extension for this reader's file format is "pvtr".
See Also:
vtkXMLRectilinearGridReader
"""
class vtkXMLPRectilinearGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLPRectilinearGridWriter - Write PVTK XML RectilinearGrid files.
Super Class:
vtkXMLPStructuredDataWriter
vtkXMLPRectilinearGridWriter writes the PVTK XML RectilinearGrid
file format. One rectilinear grid input can be written into a
parallel file format with any number of pieces spread across files.
The standard extension for this writer's file format is "pvtr".
This writer uses vtkXMLRectilinearGridWriter to write the
individual piece files.
See Also:
vtkXMLRectilinearGridWriter
"""
class vtkXMLPStructuredGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLPStructuredGridReader - Read PVTK XML StructuredGrid files.
Super Class:
vtkXMLPStructuredDataReader
vtkXMLPStructuredGridReader reads the PVTK XML StructuredGrid file
format. This reads the parallel format's summary file and then
uses vtkXMLStructuredGridReader to read data from the individual
StructuredGrid piece files. Streaming is supported. The standard
extension for this reader's file format is "pvts".
See Also:
vtkXMLStructuredGridReader
"""
class vtkXMLPStructuredGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLPStructuredGridWriter - Write PVTK XML StructuredGrid files.
Super Class:
vtkXMLPStructuredDataWriter
vtkXMLPStructuredGridWriter writes the PVTK XML StructuredGrid
file format. One structured grid input can be written into a
parallel file format with any number of pieces spread across files.
The standard extension for this writer's file format is "pvts".
This writer uses vtkXMLStructuredGridWriter to write the individual
piece files.
See Also:
vtkXMLStructuredGridWriter
"""
class vtkXMLPUnstructuredGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLPUnstructuredGridReader - Read PVTK XML UnstructuredGrid files.
Super Class:
vtkXMLPUnstructuredDataReader
vtkXMLPUnstructuredGridReader reads the PVTK XML UnstructuredGrid
file format. This reads the parallel format's summary file and
then uses vtkXMLUnstructuredGridReader to read data from the
individual UnstructuredGrid piece files. Streaming is supported.
The standard extension for this reader's file format is "pvtu".
See Also:
vtkXMLUnstructuredGridReader
"""
class vtkXMLPUnstructuredGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLPUnstructuredGridWriter - Write PVTK XML UnstructuredGrid files.
Super Class:
vtkXMLPUnstructuredDataWriter
vtkXMLPUnstructuredGridWriter writes the PVTK XML UnstructuredGrid
file format. One unstructured grid input can be written into a
parallel file format with any number of pieces spread across files.
The standard extension for this writer's file format is "pvtu".
This writer uses vtkXMLUnstructuredGridWriter to write the
individual piece files.
See Also:
vtkXMLUnstructuredGridWriter
"""
class vtkXMLPolyDataReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLPolyDataReader - Read VTK XML PolyData files.
Super Class:
vtkXMLUnstructuredDataReader
vtkXMLPolyDataReader reads the VTK XML PolyData file format. One
polygonal data file can be read to produce one output. Streaming
is supported. The standard extension for this reader's file format
is "vtp". This reader is also used to read a single piece of the
parallel file format.
See Also:
vtkXMLPPolyDataReader
"""
class vtkXMLPolyDataWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLPolyDataWriter - Write VTK XML PolyData files.
Super Class:
vtkXMLUnstructuredDataWriter
vtkXMLPolyDataWriter writes the VTK XML PolyData file format. One
polygonal data input can be written into one file in any number of
streamed pieces (if supported by the rest of the pipeline). The
standard extension for this writer's file format is "vtp". This
writer is also used to write a single piece of the parallel file
format.
See Also:
vtkXMLPPolyDataWriter
"""
class vtkXMLRectilinearGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLRectilinearGridReader - Read VTK XML RectilinearGrid files.
Super Class:
vtkXMLStructuredDataReader
vtkXMLRectilinearGridReader reads the VTK XML RectilinearGrid file
format. One rectilinear grid file can be read to produce one
output. Streaming is supported. The standard extension for this
reader's file format is "vtr". This reader is also used to read a
single piece of the parallel file format.
See Also:
vtkXMLPRectilinearGridReader
"""
class vtkXMLRectilinearGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLRectilinearGridWriter - Write VTK XML RectilinearGrid files.
Super Class:
vtkXMLStructuredDataWriter
vtkXMLRectilinearGridWriter writes the VTK XML RectilinearGrid
file format. One rectilinear grid input can be written into one
file in any number of streamed pieces. The standard extension for
this writer's file format is "vtr". This writer is also used to
write a single piece of the parallel file format.
See Also:
vtkXMLPRectilinearGridWriter
"""
class vtkXMLStructuredGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLStructuredGridReader - Read VTK XML StructuredGrid files.
Super Class:
vtkXMLStructuredDataReader
vtkXMLStructuredGridReader reads the VTK XML StructuredGrid file
format. One structured grid file can be read to produce one
output. Streaming is supported. The standard extension for this
reader's file format is "vts". This reader is also used to read a
single piece of the parallel file format.
See Also:
vtkXMLPStructuredGridReader
"""
class vtkXMLStructuredGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLStructuredGridWriter - Write VTK XML StructuredGrid files.
Super Class:
vtkXMLStructuredDataWriter
vtkXMLStructuredGridWriter writes the VTK XML StructuredGrid file
format. One structured grid input can be written into one file in
any number of streamed pieces. The standard extension for this
writer's file format is "vts". This writer is also used to write a
single piece of the parallel file format.
See Also:
vtkXMLPStructuredGridWriter
"""
class vtkXMLTreeReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLTreeReader - reads an XML file into a vtkTree
Super Class:
vtkTreeAlgorithm
vtkXMLTreeReader parses an XML file and uses the nesting structure of the
XML tags to generate a tree. Node attributes are assigned to node arrays,
and the special arrays .tagname and .chardata contain the tag type and the
text internal to the tag, respectively. The arrays are of type
vtkStringArray. There is an array for each attribute type in the XML file,
even if it appears in only one tag. If an attribute is missing from a tag,
its value is the empty string.
If MaskArrays is on (the default is off), the filter will additionally make bit
arrays whose names are prepended with ".valid." which are 1 if the element
contains that attribute, and 0 otherwise.
For example, the XML file containing the text:
<node name="jeff" age="26">
this is text in jeff's node
<node name="joe">
<node name="al" initials="amb" other="something"/>
<node name="dave" age="30"/>
</node>
<node name="lisa">this is text in lisa's node</node>
<node name="darlene" age="29"/>
</node>
would be parsed into a tree with the following node IDs and structure:
0 (jeff) - children: 1 (joe), 4 (lisa), 5 (darlene)
1 (joe) - children: 2 (al), 3 (dave)
2 (al)
3 (dave)
4 (lisa)
5 (darlene)
and the node data arrays would be as follows:
name initials other age .tagname .chardata
------------------------------------------------------------------------------------------------
jeff (empty) (empty) 26 node " this is text in jeff's node\n \n \n \n"
joe (empty) (empty) (empty) node "\n \n \n "
al amb something (empty) node (empty)
dave (empty) (empty) 30 node (empty)
lisa (empty) (empty) (empty) node "this is text in lisa's node"
darlene (empty) (empty) 29 node (empty)
There would also be the following bit arrays if MaskArrays is on:
.valid.name .valid.initials .valid.other .valid.age
---------------------------------------------------------
1 0 0 1
1 0 0 0
1 1 1 0
1 0 0 1
1 0 0 0
1 0 0 1
"""
class vtkXMLUnstructuredGridReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXMLUnstructuredGridReader - Read VTK XML UnstructuredGrid files.
Super Class:
vtkXMLUnstructuredDataReader
vtkXMLUnstructuredGridReader reads the VTK XML UnstructuredGrid
file format. One unstructured grid file can be read to produce one
output. Streaming is supported. The standard extension for this
reader's file format is "vtu". This reader is also used to read a
single piece of the parallel file format.
See Also:
vtkXMLPUnstructuredGridReader
"""
class vtkXMLUnstructuredGridWriter:
kits = ['vtk_kit']
cats = ['VTK basic writer']
help = \
"""vtkXMLUnstructuredGridWriter - Write VTK XML UnstructuredGrid files.
Super Class:
vtkXMLUnstructuredDataWriter
vtkXMLUnstructuredGridWriter writes the VTK XML UnstructuredGrid
file format. One unstructured grid input can be written into one
file in any number of streamed pieces (if supported by the rest of
the pipeline). The standard extension for this writer's file
format is "vtu". This writer is also used to write a single piece
of the parallel file format.
See Also:
vtkXMLPUnstructuredGridWriter
"""
class vtkXYZMolReader:
kits = ['vtk_kit']
cats = ['VTK basic readers']
help = \
"""vtkXYZMolReader - read Molecular Data files
Super Class:
vtkMoleculeReaderBase
vtkXYZMolReader is a source object that reads Molecule files
The FileName must be specified
.SECTION Thanks
Dr. Jean M. Favre who developed and contributed this class
""" | {
"content_hash": "cc830b49037d933559eebfb489f73859",
"timestamp": "",
"source": "github",
"line_count": 14116,
"max_line_length": 143,
"avg_line_length": 31.663502408614338,
"alnum_prop": 0.7565945203395367,
"repo_name": "fvpolpeta/devide",
"id": "c8ed61634a58816b19d19fd6eb9f53ec43341d76",
"size": "447018",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/vtk_basic/module_index.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3104368"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
} |
"""Typing helpers."""
from enum import IntEnum
from datetime import datetime
from typing import NamedTuple, Optional
__all__ = ('FilePriority', 'FileDownloadStrategy', 'HashingState', 'State',
'TorrentInfo', 'TorrentTrackedFile')
class HashingState(IntEnum):
"""Hashing state of the torrent."""
#:
NOT_HASHING = 0
#:
FIRST_HASH_CHECK = 1
#:
HASHING = 2
#:
REHASHING = 3
class State(IntEnum):
"""State of the torrent."""
#:
STOPPED = 0
#:
STARTED_OR_PAUSED = 1
class TorrentInfo(NamedTuple):
"""Torrent information."""
hash: str
is_open: bool
is_hash_checking: bool
is_hash_checked: bool
state: State
name: str
size_bytes: int
completed_chunks: int
size_chunks: int
bytes_done: int
up_total: int
ratio: float
up_rate: int
down_rate: int
#: Chunk size (usually a power of 2).
chunk_size: int
#: Usually contains the assigned label.
custom1: str
peers_accounted: int
peers_not_connected: int
peers_connected: int
peers_complete: int
left_bytes: int
priority: int
state_changed: Optional[datetime]
skip_total: int
hashing: HashingState
chunks_hashed: int
#: Path before the torrent directory or file.
base_path: str
#: Date torrent was added to the client. Can be ``None`` if this was not
#: captured, or possibly due to a crash.
creation_date: Optional[datetime]
tracker_focus: int
is_active: bool
#: Message from the server.
message: str
custom2: str
free_diskspace: int
is_private: bool
is_multi_file: bool
# unk1: str
finished: Optional[datetime]
class FilePriority(IntEnum):
"""
Single file priority. These are based on ruTorrent's code, not
rTorrent's.
"""
#:
DO_NOT_DOWNLOAD = 0
#:
NORMAL = 1
#:
HIGH = 2
class FileDownloadStrategy(IntEnum):
"""Single file download strategy."""
#: Also known as 'trailing chunk first'.
NORMAL = 0
#:
LEADING_CHUNK_FIRST = 1
class TorrentTrackedFile(NamedTuple):
"""Contains information about a single file within a torrent."""
#: File name without path.
name: str
number_of_pieces: int
downloaded_pieces: int
size_bytes: int
#: Download priority.
priority_id: FilePriority
#: Download strategy.
download_strategy_id: FileDownloadStrategy
| {
"content_hash": "04798c7d91ffbf96397f8ccfee12bab1",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 76,
"avg_line_length": 22.50925925925926,
"alnum_prop": 0.6347182229535171,
"repo_name": "Tatsh/xirvik-tools",
"id": "779ac205052493a8c85a256f6f3ad9ef557f6dd4",
"size": "2431",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "xirvik/typing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118908"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
import collections
import itertools
import json
import os
import posixpath
import re
import time
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.files.storage import default_storage as storage
from django.db import models, transaction
from django.dispatch import receiver
from django.db.models import Max, Q, signals as dbsignals
from django.utils.translation import trans_real
import caching.base as caching
import commonware.log
import json_field
from django_statsd.clients import statsd
from jinja2.filters import do_dictsort
from tower import ugettext_lazy as _
from olympia import amo
from olympia.amo.models import (
SlugField, OnChangeMixin, ModelBase, ManagerBase, manual_order)
from olympia.access import acl
from olympia.addons.utils import get_creatured_ids, get_featured_ids
from olympia.amo import helpers
from olympia.amo.decorators import use_master, write
from olympia.amo.utils import (
attach_trans_dict, cache_ns_key, chunked, find_language, JSONEncoder,
no_translation, send_mail, slugify, sorted_groupby, timer, to_language,
urlparams)
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.files.models import File
from olympia.reviews.models import Review
from olympia.tags.models import Tag
from olympia.translations.fields import (
LinkifiedField, PurifiedField, save_signal, TranslatedField, Translation)
from olympia.translations.query import order_by_translation
from olympia.users.models import UserForeignKey, UserProfile
from olympia.versions.compare import version_int
from olympia.versions.models import inherit_nomination, Version
from . import query, signals
log = commonware.log.getLogger('z.addons')
def clean_slug(instance, slug_field='slug'):
"""Cleans a model instance slug.
This strives to be as generic as possible as it's used by Addons
and Collections, and maybe more in the future.
"""
slug = getattr(instance, slug_field, None) or instance.name
if not slug:
# Initialize the slug with what we have available: a name translation,
# or the id of the instance, or in last resort the model name.
translations = Translation.objects.filter(id=instance.name_id)
if translations.exists():
slug = translations[0]
elif instance.id:
slug = str(instance.id)
else:
slug = instance.__class__.__name__
max_length = instance._meta.get_field_by_name(slug_field)[0].max_length
slug = slugify(slug)[:max_length]
if BlacklistedSlug.blocked(slug):
slug = slug[:max_length - 1] + '~'
# The following trick makes sure we are using a manager that returns
# all the objects, as otherwise we could have a slug clash on our hands.
# Eg with the "Addon.objects" manager, which doesn't list deleted addons,
# we could have a "clean" slug which is in fact already assigned to an
# already existing (deleted) addon. Also, make sure we use the base class.
manager = models.Manager()
manager.model = instance._meta.proxy_for_model or instance.__class__
qs = manager.values_list(slug_field, flat=True) # Get list of all slugs.
if instance.id:
qs = qs.exclude(pk=instance.id) # Can't clash with itself.
# We first need to make sure there's a clash, before trying to find a
# suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still
# available.
clash = qs.filter(**{slug_field: slug})
if clash.exists():
# Leave space for 99 clashes.
slug = slugify(slug)[:max_length - 2]
# There is a clash, so find a suffix that will make this slug unique.
lookup = {'%s__startswith' % slug_field: slug}
clashes = qs.filter(**lookup)
# Try numbers between 1 and the number of clashes + 1 (+ 1 because we
# start the range at 1, not 0):
# if we have two clashes "foo1" and "foo2", we need to try "foox"
# for x between 1 and 3 to be absolutely sure to find an available one.
for idx in range(1, len(clashes) + 2):
new = ('%s%s' % (slug, idx))[:max_length]
if new not in clashes:
slug = new
break
else:
# This could happen. The current implementation (using
# ``[:max_length -3]``) only works for the first 100 clashes in the
# worst case (if the slug is equal to or longuer than
# ``max_length - 3`` chars).
# After that, {verylongslug}-100 will be trimmed down to
# {verylongslug}-10, which is already assigned, but it's the last
# solution tested.
raise RuntimeError
setattr(instance, slug_field, slug)
return instance
class AddonManager(ManagerBase):
def __init__(self, include_deleted=False, include_unlisted=False):
# DO NOT change the default value of include_deleted and
# include_unlisted unless you've read through the comment just above
# the Addon managers declaration/instanciation and understand the
# consequences.
ManagerBase.__init__(self)
self.include_deleted = include_deleted
self.include_unlisted = include_unlisted
def get_query_set(self):
qs = super(AddonManager, self).get_query_set()
qs = qs._clone(klass=query.IndexQuerySet)
if not self.include_deleted:
qs = qs.exclude(status=amo.STATUS_DELETED)
if not self.include_unlisted:
qs = qs.exclude(is_listed=False)
return qs.transform(Addon.transformer)
def id_or_slug(self, val):
if isinstance(val, basestring) and not val.isdigit():
return self.filter(slug=val)
return self.filter(id=val)
def enabled(self):
return self.filter(disabled_by_user=False)
def public(self):
"""Get public add-ons only"""
return self.filter(self.valid_q([amo.STATUS_PUBLIC]))
def reviewed(self):
"""Get add-ons with a reviewed status"""
return self.filter(self.valid_q(amo.REVIEWED_STATUSES))
def unreviewed(self):
"""Get only unreviewed add-ons"""
return self.filter(self.valid_q(amo.UNREVIEWED_STATUSES))
def valid(self):
"""Get valid, enabled add-ons only"""
return self.filter(self.valid_q(amo.LISTED_STATUSES))
def valid_and_disabled_and_pending(self):
"""
Get valid, pending, enabled and disabled add-ons.
Used to allow pending theme pages to still be viewed.
"""
statuses = list(amo.LISTED_STATUSES) + [amo.STATUS_DISABLED,
amo.STATUS_PENDING]
return (self.filter(Q(status__in=statuses) | Q(disabled_by_user=True))
.exclude(type=amo.ADDON_EXTENSION,
_current_version__isnull=True))
def featured(self, app, lang=None, type=None):
"""
Filter for all featured add-ons for an application in all locales.
"""
ids = get_featured_ids(app, lang, type)
return manual_order(self.listed(app), ids, 'addons.id')
def listed(self, app, *status):
"""
Listed add-ons have a version with a file matching ``status`` and are
not disabled. Personas and self-hosted add-ons will be returned too.
"""
if len(status) == 0:
status = [amo.STATUS_PUBLIC]
return self.filter(self.valid_q(status), appsupport__app=app.id)
def valid_q(self, status=[], prefix=''):
"""
Return a Q object that selects a valid Addon with the given statuses.
An add-on is valid if not disabled and has a current version.
``prefix`` can be used if you're not working with Addon directly and
need to hop across a join, e.g. ``prefix='addon__'`` in
CollectionAddon.
"""
if not status:
status = [amo.STATUS_PUBLIC]
def q(*args, **kw):
if prefix:
kw = dict((prefix + k, v) for k, v in kw.items())
return Q(*args, **kw)
return q(q(_current_version__isnull=False),
disabled_by_user=False, status__in=status)
class Addon(OnChangeMixin, ModelBase):
STATUS_CHOICES = amo.STATUS_CHOICES_ADDON
guid = models.CharField(max_length=255, unique=True, null=True)
slug = models.CharField(max_length=30, unique=True, null=True)
name = TranslatedField(default=None)
default_locale = models.CharField(max_length=10,
default=settings.LANGUAGE_CODE,
db_column='defaultlocale')
type = models.PositiveIntegerField(db_column='addontype_id', default=0)
status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), db_index=True, default=0)
highest_status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), default=0,
help_text="An upper limit for what an author can change.",
db_column='higheststatus')
icon_type = models.CharField(max_length=25, blank=True,
db_column='icontype')
homepage = TranslatedField()
support_email = TranslatedField(db_column='supportemail')
support_url = TranslatedField(db_column='supporturl')
description = PurifiedField(short=False)
summary = LinkifiedField()
developer_comments = PurifiedField(db_column='developercomments')
eula = PurifiedField()
privacy_policy = PurifiedField(db_column='privacypolicy')
the_reason = PurifiedField()
the_future = PurifiedField()
average_rating = models.FloatField(max_length=255, default=0, null=True,
db_column='averagerating')
bayesian_rating = models.FloatField(default=0, db_index=True,
db_column='bayesianrating')
total_reviews = models.PositiveIntegerField(default=0,
db_column='totalreviews')
weekly_downloads = models.PositiveIntegerField(
default=0, db_column='weeklydownloads', db_index=True)
total_downloads = models.PositiveIntegerField(
default=0, db_column='totaldownloads')
hotness = models.FloatField(default=0, db_index=True)
average_daily_downloads = models.PositiveIntegerField(default=0)
average_daily_users = models.PositiveIntegerField(default=0)
last_updated = models.DateTimeField(
db_index=True, null=True,
help_text='Last time this add-on had a file/version update')
disabled_by_user = models.BooleanField(default=False, db_index=True,
db_column='inactive')
trusted = models.BooleanField(default=False)
view_source = models.BooleanField(default=True, db_column='viewsource')
public_stats = models.BooleanField(default=False, db_column='publicstats')
prerelease = models.BooleanField(default=False)
admin_review = models.BooleanField(default=False, db_column='adminreview')
admin_review_type = models.PositiveIntegerField(
choices=amo.ADMIN_REVIEW_TYPES.items(), default=amo.ADMIN_REVIEW_FULL)
site_specific = models.BooleanField(default=False,
db_column='sitespecific')
external_software = models.BooleanField(default=False,
db_column='externalsoftware')
dev_agreement = models.BooleanField(
default=False, help_text="Has the dev agreement been signed?")
auto_repackage = models.BooleanField(
default=True, help_text='Automatically upgrade jetpack add-on to a '
'new sdk version?')
target_locale = models.CharField(
max_length=255, db_index=True, blank=True, null=True,
help_text="For dictionaries and language packs")
locale_disambiguation = models.CharField(
max_length=255, blank=True, null=True,
help_text="For dictionaries and language packs")
wants_contributions = models.BooleanField(default=False)
paypal_id = models.CharField(max_length=255, blank=True)
charity = models.ForeignKey('Charity', null=True)
suggested_amount = models.DecimalField(
max_digits=9, decimal_places=2, blank=True,
null=True, help_text=_(u'Users have the option of contributing more '
'or less than this amount.'))
total_contributions = models.DecimalField(max_digits=9, decimal_places=2,
blank=True, null=True)
annoying = models.PositiveIntegerField(
choices=amo.CONTRIB_CHOICES, default=0,
help_text=_(u'Users will always be asked in the Add-ons'
u' Manager (Firefox 4 and above)'))
enable_thankyou = models.BooleanField(
default=False, help_text='Should the thank you note be sent to '
'contributors?')
thankyou_note = TranslatedField()
authors = models.ManyToManyField('users.UserProfile', through='AddonUser',
related_name='addons')
categories = models.ManyToManyField('Category', through='AddonCategory')
dependencies = models.ManyToManyField('self', symmetrical=False,
through='AddonDependency',
related_name='addons')
_current_version = models.ForeignKey(Version, db_column='current_version',
related_name='+', null=True,
on_delete=models.SET_NULL)
_latest_version = models.ForeignKey(Version, db_column='latest_version',
on_delete=models.SET_NULL,
null=True, related_name='+')
mozilla_contact = models.EmailField(blank=True)
whiteboard = models.TextField(blank=True)
# Whether the add-on is listed on AMO or not.
is_listed = models.BooleanField(default=True, db_index=True)
# The order of those managers is very important:
# The first one discovered, if it has "use_for_related_fields = True"
# (which it has if it's inheriting from caching.base.CachingManager), will
# be used for relations like `version.addon`. We thus want one that is NOT
# filtered in any case, we don't want a 500 if the addon is not found
# (because it has the status amo.STATUS_DELETED for example).
# The CLASS of the first one discovered will also be used for "many to many
# relations" like `collection.addons`. In that case, we do want the
# filtered version by default, to make sure we're not displaying stuff by
# mistake. You thus want the CLASS of the first one to be filtered by
# default.
# We don't control the instantiation, but AddonManager sets include_deleted
# and include_unlisted to False by default, so filtering is enabled by
# default. This is also why it's not repeated for 'objects' below.
unfiltered = AddonManager(include_deleted=True, include_unlisted=True)
with_unlisted = AddonManager(include_unlisted=True)
objects = AddonManager()
class Meta:
db_table = 'addons'
@staticmethod
def __new__(cls, *args, **kw):
try:
type_idx = Addon._meta._type_idx
except AttributeError:
type_idx = (idx for idx, f in enumerate(Addon._meta.fields)
if f.attname == 'type').next()
Addon._meta._type_idx = type_idx
return object.__new__(cls)
def __unicode__(self):
return u'%s: %s' % (self.id, self.name)
def __init__(self, *args, **kw):
super(Addon, self).__init__(*args, **kw)
self._first_category = {}
if self.type == amo.ADDON_PERSONA:
self.STATUS_CHOICES = Persona.STATUS_CHOICES
def save(self, **kw):
self.clean_slug()
super(Addon, self).save(**kw)
# Like the above Manager objects (`objects`, `with_unlisted`, ...), but
# for ElasticSearch queries.
@classmethod
def search_public(cls):
return cls.search_with_unlisted().filter(is_listed=True)
@classmethod
def search_with_unlisted(cls):
return cls.search().filter(
is_disabled=False, status__in=amo.REVIEWED_STATUSES)
@use_master
def clean_slug(self, slug_field='slug'):
if self.status == amo.STATUS_DELETED:
return
clean_slug(self, slug_field)
@transaction.atomic
def delete(self, msg='', reason=''):
# To avoid a circular import.
from . import tasks
# Check for soft deletion path. Happens only if the addon status isn't
# 0 (STATUS_INCOMPLETE).
soft_deletion = self.highest_status or self.status
if soft_deletion and self.status == amo.STATUS_DELETED:
# We're already done.
return
id = self.id
# Fetch previews before deleting the addon instance, so that we can
# pass the list of files to delete to the delete_preview_files task
# after the addon is deleted.
previews = list(Preview.objects.filter(addon__id=id)
.values_list('id', flat=True))
if soft_deletion:
# /!\ If we ever stop using soft deletion, and remove this code, we
# need to make sure that the logs created below aren't cascade
# deleted!
log.debug('Deleting add-on: %s' % self.id)
to = [settings.FLIGTAR]
user = amo.get_user()
# Don't localize email to admins, use 'en-US' always.
with no_translation():
# The types are lazy translated in apps/constants/base.py.
atype = amo.ADDON_TYPE.get(self.type).upper()
context = {
'atype': atype,
'authors': [u.email for u in self.authors.all()],
'adu': self.average_daily_users,
'guid': self.guid,
'id': self.id,
'msg': msg,
'reason': reason,
'name': self.name,
'slug': self.slug,
'total_downloads': self.total_downloads,
'url': helpers.absolutify(self.get_url_path()),
'user_str': ("%s, %s (%s)" % (user.display_name or
user.username, user.email,
user.id) if user else "Unknown"),
}
email_msg = u"""
The following %(atype)s was deleted.
%(atype)s: %(name)s
URL: %(url)s
DELETED BY: %(user_str)s
ID: %(id)s
GUID: %(guid)s
AUTHORS: %(authors)s
TOTAL DOWNLOADS: %(total_downloads)s
AVERAGE DAILY USERS: %(adu)s
NOTES: %(msg)s
REASON GIVEN BY USER FOR DELETION: %(reason)s
""" % context
log.debug('Sending delete email for %(atype)s %(id)s' % context)
subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context
# Update or NULL out various fields.
models.signals.pre_delete.send(sender=Addon, instance=self)
self._reviews.all().delete()
# The last parameter is needed to automagically create an AddonLog.
amo.log(amo.LOG.DELETE_ADDON, self.pk, unicode(self.guid), self)
self.update(status=amo.STATUS_DELETED, slug=None,
_current_version=None)
models.signals.post_delete.send(sender=Addon, instance=self)
send_mail(subject, email_msg, recipient_list=to)
else:
# Real deletion path.
super(Addon, self).delete()
for preview in previews:
tasks.delete_preview_files.delay(preview)
# Remove from search index.
tasks.unindex_addons.delay([id])
return True
@classmethod
def initialize_addon_from_upload(cls, data, is_listed=True):
fields = cls._meta.get_all_field_names()
guid = data.get('guid')
old_guid_addon = None
if guid: # It's an extension.
# Reclaim GUID from deleted add-on.
try:
old_guid_addon = Addon.unfiltered.get(guid=guid)
old_guid_addon.update(guid=None)
except ObjectDoesNotExist:
pass
addon = Addon(**dict((k, v) for k, v in data.items() if k in fields))
addon.status = amo.STATUS_NULL
addon.is_listed = is_listed
locale_is_set = (addon.default_locale and
addon.default_locale in (
settings.AMO_LANGUAGES +
settings.HIDDEN_LANGUAGES) and
data.get('default_locale') == addon.default_locale)
if not locale_is_set:
addon.default_locale = to_language(trans_real.get_language())
addon.save()
if old_guid_addon:
old_guid_addon.update(guid='guid-reused-by-pk-{}'.format(addon.pk))
old_guid_addon.save()
return addon
@classmethod
def create_addon_from_upload_data(cls, data, user=None, **kwargs):
addon = cls.initialize_addon_from_upload(data, **kwargs)
AddonUser(addon=addon, user=user).save()
return addon
@classmethod
def from_upload(cls, upload, platforms, source=None, is_listed=True,
data=None):
from olympia.files.utils import parse_addon
if not data:
data = parse_addon(upload)
addon = cls.initialize_addon_from_upload(
is_listed=is_listed, data=data)
if upload.validation_timeout:
addon.update(admin_review=True)
Version.from_upload(upload, addon, platforms, source=source)
amo.log(amo.LOG.CREATE_ADDON, addon)
log.debug('New addon %r from %r' % (addon, upload))
return addon
def flush_urls(self):
urls = ['*/addon/%s/' % self.slug, # Doesn't take care of api
'*/addon/%s/developers/' % self.slug,
'*/addon/%s/eula/*' % self.slug,
'*/addon/%s/privacy/' % self.slug,
'*/addon/%s/versions/*' % self.slug,
'*/api/*/addon/%s' % self.slug,
self.icon_url,
self.thumbnail_url,
]
urls.extend('*/user/%d/' % u.id for u in self.listed_authors)
return urls
def get_url_path(self, more=False, add_prefix=True):
if not self.is_listed: # Not listed? Doesn't have a public page.
return ''
# If more=True you get the link to the ajax'd middle chunk of the
# detail page.
view = 'addons.detail_more' if more else 'addons.detail'
return reverse(view, args=[self.slug], add_prefix=add_prefix)
def get_api_url(self):
# Used by Piston in output.
return helpers.absolutify(self.get_url_path())
def get_dev_url(self, action='edit', args=None, prefix_only=False):
args = args or []
prefix = 'devhub'
type_ = 'themes' if self.type == amo.ADDON_PERSONA else 'addons'
if not prefix_only:
prefix += '.%s' % type_
view_name = '{prefix}.{action}'.format(prefix=prefix,
action=action)
return reverse(view_name, args=[self.slug] + args)
def get_detail_url(self, action='detail', args=[]):
return reverse('addons.%s' % action, args=[self.slug] + args)
def meet_the_dev_url(self):
return reverse('addons.meet', args=[self.slug])
@property
def reviews_url(self):
return helpers.url('addons.reviews.list', self.slug)
def get_ratings_url(self, action='list', args=None, add_prefix=True):
return reverse('ratings.themes.%s' % action,
args=[self.slug] + (args or []),
add_prefix=add_prefix)
@classmethod
def get_type_url(cls, type):
try:
type = amo.ADDON_SLUGS[type]
except KeyError:
return None
return reverse('browse.%s' % type)
def type_url(self):
"""The url for this add-on's type."""
return Addon.get_type_url(self.type)
def share_url(self):
return reverse('addons.share', args=[self.slug])
@property
def automated_signing(self):
# We allow automated signing for add-ons which are not listed.
# Beta versions are a special case for listed add-ons, and are dealt
# with on a file-by-file basis.
return not self.is_listed
@property
def is_sideload(self):
# An add-on can side-load if it has been fully reviewed.
return self.status in (amo.STATUS_NOMINATED, amo.STATUS_PUBLIC)
@amo.cached_property(writable=True)
def listed_authors(self):
return UserProfile.objects.filter(
addons=self,
addonuser__listed=True).order_by('addonuser__position')
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def reviews(self):
return Review.objects.filter(addon=self, reply_to=None)
def get_category(self, app):
if app in getattr(self, '_first_category', {}):
return self._first_category[app]
categories = list(self.categories.filter(application=app))
return categories[0] if categories else None
def language_ascii(self):
lang = trans_real.to_language(self.default_locale)
return settings.LANGUAGES.get(lang)
@property
def valid_file_statuses(self):
if self.status == amo.STATUS_PUBLIC:
return [amo.STATUS_PUBLIC]
if self.status in (amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED):
return [amo.STATUS_PUBLIC, amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED]
return amo.VALID_STATUSES
def get_version(self):
"""Retrieve the latest public version of an addon."""
if self.type == amo.ADDON_PERSONA:
return
try:
status = self.valid_file_statuses
status_list = ','.join(map(str, status))
fltr = {'files__status__in': status}
return self.versions.no_cache().filter(**fltr).extra(
where=["""
NOT EXISTS (
SELECT 1 FROM files AS f2
WHERE f2.version_id = versions.id AND
f2.status NOT IN (%s))
""" % status_list])[0]
except (IndexError, Version.DoesNotExist):
return None
@write
def update_version(self, ignore=None, _signal=True):
"""
Returns true if we updated the field.
The optional ``ignore`` parameter, if present, is a a version
to not consider as part of the update, since it may be in the
process of being deleted.
Pass ``_signal=False`` if you want to no signals fired at all.
"""
if self.is_persona():
# Versions are not as critical on themes.
# If there are no versions, just create one and go.
if not self._current_version:
if self._latest_version:
self.update(_current_version=self._latest_version,
_signal=False)
return True
return False
current = self.get_version()
try:
latest_qs = self.versions.exclude(files__status=amo.STATUS_BETA)
if ignore is not None:
latest_qs = latest_qs.exclude(pk=ignore.pk)
latest = latest_qs.latest()
except Version.DoesNotExist:
latest = None
latest_id = latest and latest.id
diff = [self._current_version, current]
# Sometimes the DB is in an inconsistent state when this
# signal is dispatched.
try:
if self._latest_version:
# Make sure stringifying this does not trigger
# Version.DoesNotExist before trying to use it for
# logging.
unicode(self._latest_version)
diff += [self._latest_version, latest]
except Version.DoesNotExist:
diff += [self._latest_version_id, latest_id]
updated = {}
send_signal = False
if self._current_version != current:
updated.update({'_current_version': current})
send_signal = True
# Don't use self.latest_version here. It may throw Version.DoesNotExist
# if we're called from a post_delete signal. We also don't set
# send_signal since we only want this fired if the public version
# changes.
if self._latest_version_id != latest_id:
updated.update({'_latest_version': latest})
# update_version can be called by a post_delete signal (such
# as File's) when deleting a version. If so, we should avoid putting
# that version-being-deleted in any fields.
if ignore is not None:
updated = dict([(k, v) for (k, v) in updated.iteritems()
if v != ignore])
if updated:
# Pass along _signal to the .update() to prevent it from firing
# signals if we don't want them.
updated['_signal'] = _signal
try:
self.update(**updated)
if send_signal and _signal:
signals.version_changed.send(sender=self)
log.info(u'Version changed from current: %s to %s, '
u'latest: %s to %s for addon %s'
% tuple(diff + [self]))
except Exception, e:
log.error(u'Could not save version changes current: %s to %s, '
u'latest: %s to %s for addon %s (%s)' %
tuple(diff + [self, e]))
return bool(updated)
def compatible_version(self, app_id, app_version=None, platform=None,
compat_mode='strict'):
"""Returns the newest compatible version given the input."""
if not app_id:
return None
if platform:
# We include platform_id=1 always in the SQL so we skip it here.
platform = platform.lower()
if platform != 'all' and platform in amo.PLATFORM_DICT:
platform = amo.PLATFORM_DICT[platform].id
else:
platform = None
log.debug(u'Checking compatibility for add-on ID:%s, APP:%s, V:%s, '
u'OS:%s, Mode:%s' % (self.id, app_id, app_version, platform,
compat_mode))
valid_file_statuses = ','.join(map(str, self.valid_file_statuses))
data = dict(id=self.id, app_id=app_id, platform=platform,
valid_file_statuses=valid_file_statuses)
if app_version:
data.update(version_int=version_int(app_version))
else:
# We can't perform the search queries for strict or normal without
# an app version.
compat_mode = 'ignore'
ns_key = cache_ns_key('d2c-versions:%s' % self.id)
cache_key = '%s:%s:%s:%s:%s' % (ns_key, app_id, app_version, platform,
compat_mode)
version_id = cache.get(cache_key)
if version_id is not None:
log.debug(u'Found compatible version in cache: %s => %s' % (
cache_key, version_id))
if version_id == 0:
return None
else:
try:
return Version.objects.get(pk=version_id)
except Version.DoesNotExist:
pass
raw_sql = ["""
SELECT versions.*
FROM versions
INNER JOIN addons
ON addons.id = versions.addon_id AND addons.id = %(id)s
INNER JOIN applications_versions
ON applications_versions.version_id = versions.id
INNER JOIN appversions appmin
ON appmin.id = applications_versions.min
AND appmin.application_id = %(app_id)s
INNER JOIN appversions appmax
ON appmax.id = applications_versions.max
AND appmax.application_id = %(app_id)s
INNER JOIN files
ON files.version_id = versions.id AND
(files.platform_id = 1"""]
if platform:
raw_sql.append(' OR files.platform_id = %(platform)s')
raw_sql.append(') WHERE files.status IN (%(valid_file_statuses)s) ')
if app_version:
raw_sql.append('AND appmin.version_int <= %(version_int)s ')
if compat_mode == 'ignore':
pass # No further SQL modification required.
elif compat_mode == 'normal':
raw_sql.append("""AND
CASE WHEN files.strict_compatibility = 1 OR
files.binary_components = 1
THEN appmax.version_int >= %(version_int)s ELSE 1 END
""")
# Filter out versions that don't have the minimum maxVersion
# requirement to qualify for default-to-compatible.
d2c_max = amo.D2C_MAX_VERSIONS.get(app_id)
if d2c_max:
data['d2c_max_version'] = version_int(d2c_max)
raw_sql.append(
"AND appmax.version_int >= %(d2c_max_version)s ")
# Filter out versions found in compat overrides
raw_sql.append("""AND
NOT versions.id IN (
SELECT version_id FROM incompatible_versions
WHERE app_id=%(app_id)s AND
(min_app_version='0' AND
max_app_version_int >= %(version_int)s) OR
(min_app_version_int <= %(version_int)s AND
max_app_version='*') OR
(min_app_version_int <= %(version_int)s AND
max_app_version_int >= %(version_int)s)) """)
else: # Not defined or 'strict'.
raw_sql.append('AND appmax.version_int >= %(version_int)s ')
raw_sql.append('ORDER BY versions.id DESC LIMIT 1;')
version = Version.objects.raw(''.join(raw_sql) % data)
if version:
version = version[0]
version_id = version.id
else:
version = None
version_id = 0
log.debug(u'Caching compat version %s => %s' % (cache_key, version_id))
cache.set(cache_key, version_id, None)
return version
def increment_version(self):
"""Increment version number by 1."""
version = self.latest_version or self.current_version
version.version = str(float(version.version) + 1)
# Set the current version.
self.update(_current_version=version.save())
def invalidate_d2c_versions(self):
"""Invalidates the cache of compatible versions.
Call this when there is an event that may change what compatible
versions are returned so they are recalculated.
"""
key = cache_ns_key('d2c-versions:%s' % self.id, increment=True)
log.info('Incrementing d2c-versions namespace for add-on [%s]: %s' % (
self.id, key))
@property
def current_version(self):
"""Returns the current_version or None if the app is deleted or not
created yet"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._current_version
except ObjectDoesNotExist:
pass
return None
@property
def latest_version(self):
"""Returns the latest_version or None if the app is deleted or not
created yet"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._latest_version
except ObjectDoesNotExist:
pass
return None
@amo.cached_property
def binary(self):
"""Returns if the current version has binary files."""
version = self.current_version
if version:
return version.files.filter(binary=True).exists()
return False
@amo.cached_property
def binary_components(self):
"""Returns if the current version has files with binary_components."""
version = self.current_version
if version:
return version.files.filter(binary_components=True).exists()
return False
def get_icon_dir(self):
return os.path.join(helpers.user_media_path('addon_icons'),
'%s' % (self.id / 1000))
def get_icon_url(self, size, use_default=True):
"""
Returns either the addon's icon url.
If this is not a theme or persona and there is no
icon for the addon then if:
use_default is True, will return a default icon
use_default is False, will return None
"""
icon_type_split = []
if self.icon_type:
icon_type_split = self.icon_type.split('/')
# Get the closest allowed size without going over
if (size not in amo.ADDON_ICON_SIZES
and size >= amo.ADDON_ICON_SIZES[0]):
size = [s for s in amo.ADDON_ICON_SIZES if s < size][-1]
elif size < amo.ADDON_ICON_SIZES[0]:
size = amo.ADDON_ICON_SIZES[0]
# Figure out what to return for an image URL
if self.type == amo.ADDON_PERSONA:
return self.persona.icon_url
if not self.icon_type:
if self.type == amo.ADDON_THEME:
icon = amo.ADDON_ICONS[amo.ADDON_THEME]
return "%simg/icons/%s" % (settings.STATIC_URL, icon)
else:
if not use_default:
return None
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL,
'default',
size
)
elif icon_type_split[0] == 'icon':
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL,
icon_type_split[1],
size
)
else:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
modified = int(time.mktime(self.modified.timetuple()))
path = '/'.join([
split_id.group(2) or '0',
'{0}-{1}.png?modified={2}'.format(self.id, size, modified),
])
return helpers.user_media_url('addon_icons') + path
@write
def update_status(self, ignore_version=None):
self.reload()
if (self.status in [amo.STATUS_NULL, amo.STATUS_DELETED]
or self.is_disabled or self.is_persona()):
self.update_version(ignore=ignore_version)
return
def logit(reason, old=self.status):
log.info('Changing add-on status [%s]: %s => %s (%s).'
% (self.id, old, self.status, reason))
amo.log(amo.LOG.CHANGE_STATUS, self.get_status_display(), self)
versions = self.versions.all()
status = None
if not versions.exists():
status = amo.STATUS_NULL
logit('no versions')
elif not versions.filter(
files__status__in=amo.VALID_STATUSES).exists():
status = amo.STATUS_NULL
logit('no version with valid file')
elif (self.status == amo.STATUS_PUBLIC and
not versions.filter(files__status=amo.STATUS_PUBLIC).exists()):
if versions.filter(files__status=amo.STATUS_LITE).exists():
status = amo.STATUS_LITE
logit('only lite files')
else:
status = amo.STATUS_UNREVIEWED
logit('no reviewed files')
elif (self.status in amo.REVIEWED_STATUSES
and self.latest_version
and self.latest_version.has_files
and (self.latest_version.all_files[0].status
in amo.UNDER_REVIEW_STATUSES)):
# Addon is public, but its latest file is not (it's the case on a
# new file upload). So, call update, to trigger watch_status, which
# takes care of setting nomination time when needed.
status = self.status
if status is not None:
self.update(status=status)
self.update_version(ignore=ignore_version)
@staticmethod
def attach_related_versions(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
current_ids = filter(None, (a._current_version_id for a in addons))
latest_ids = filter(None, (a._latest_version_id for a in addons))
all_ids = set(current_ids) | set(latest_ids)
versions = list(Version.objects.filter(id__in=all_ids).order_by())
for version in versions:
try:
addon = addon_dict[version.addon_id]
except KeyError:
log.debug('Version %s has an invalid add-on id.' % version.id)
continue
if addon._current_version_id == version.id:
addon._current_version = version
if addon._latest_version_id == version.id:
addon._latest_version = version
version.addon = addon
@staticmethod
def attach_listed_authors(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
q = (UserProfile.objects.no_cache()
.filter(addons__in=addons, addonuser__listed=True)
.extra(select={'addon_id': 'addons_users.addon_id',
'position': 'addons_users.position'}))
q = sorted(q, key=lambda u: (u.addon_id, u.position))
for addon_id, users in itertools.groupby(q, key=lambda u: u.addon_id):
addon_dict[addon_id].listed_authors = list(users)
# FIXME: set listed_authors to empty list on addons without listed
# authors.
@staticmethod
def attach_previews(addons, addon_dict=None, no_transforms=False):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = Preview.objects.filter(addon__in=addons,
position__gte=0).order_by()
if no_transforms:
qs = qs.no_transforms()
qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created))
for addon, previews in itertools.groupby(qs, lambda x: x.addon_id):
addon_dict[addon].all_previews = list(previews)
# FIXME: set all_previews to empty list on addons without previews.
@staticmethod
@timer
def transformer(addons):
if not addons:
return
addon_dict = dict((a.id, a) for a in addons)
personas = [a for a in addons if a.type == amo.ADDON_PERSONA]
addons = [a for a in addons if a.type != amo.ADDON_PERSONA]
# Set _latest_version, _current_version
Addon.attach_related_versions(addons, addon_dict=addon_dict)
# Attach listed authors.
Addon.attach_listed_authors(addons, addon_dict=addon_dict)
for persona in Persona.objects.no_cache().filter(addon__in=personas):
addon = addon_dict[persona.addon_id]
addon.persona = persona
addon.weekly_downloads = persona.popularity
# Personas need categories for the JSON dump.
Category.transformer(personas)
# Attach previews.
Addon.attach_previews(addons, addon_dict=addon_dict)
# Attach _first_category for Firefox.
cats = dict(AddonCategory.objects.values_list('addon', 'category')
.filter(addon__in=addon_dict,
category__application=amo.FIREFOX.id))
qs = Category.objects.filter(id__in=set(cats.values()))
categories = dict((c.id, c) for c in qs)
for addon in addons:
category = categories[cats[addon.id]] if addon.id in cats else None
addon._first_category[amo.FIREFOX.id] = category
return addon_dict
@property
def show_beta(self):
return self.status == amo.STATUS_PUBLIC and self.current_beta_version
def show_adu(self):
return self.type != amo.ADDON_SEARCH
@amo.cached_property
def current_beta_version(self):
"""Retrieves the latest version of an addon, in the beta channel."""
versions = self.versions.filter(files__status=amo.STATUS_BETA)[:1]
if versions:
return versions[0]
@property
def icon_url(self):
return self.get_icon_url(32)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
if app:
qs = Addon.objects.listed(app)
else:
qs = Addon.objects.valid()
return (qs.exclude(id=self.id)
.filter(addonuser__listed=True,
authors__in=self.listed_authors)
.distinct())
@property
def contribution_url(self, lang=settings.LANGUAGE_CODE,
app=settings.DEFAULT_APP):
return reverse('addons.contribute', args=[self.slug])
@property
def thumbnail_url(self):
"""
Returns the addon's thumbnail url or a default.
"""
try:
preview = self.all_previews[0]
return preview.thumbnail_url
except IndexError:
return settings.STATIC_URL + '/img/icons/no-preview.png'
def can_request_review(self):
"""Return the statuses an add-on can request."""
if not File.objects.filter(version__addon=self):
return ()
if (self.is_disabled or
self.status in (amo.STATUS_PUBLIC,
amo.STATUS_LITE_AND_NOMINATED,
amo.STATUS_DELETED) or
not self.latest_version or
not self.latest_version.files.exclude(
status=amo.STATUS_DISABLED)):
return ()
elif self.status == amo.STATUS_NOMINATED:
return (amo.STATUS_LITE,)
elif self.status in [amo.STATUS_UNREVIEWED, amo.STATUS_LITE]:
return (amo.STATUS_PUBLIC,)
else:
return (amo.STATUS_LITE, amo.STATUS_PUBLIC)
def is_persona(self):
return self.type == amo.ADDON_PERSONA
@property
def is_disabled(self):
"""True if this Addon is disabled.
It could be disabled by an admin or disabled by the developer
"""
return self.status == amo.STATUS_DISABLED or self.disabled_by_user
@property
def is_deleted(self):
return self.status == amo.STATUS_DELETED
@property
def is_under_review(self):
return self.status in amo.UNDER_REVIEW_STATUSES
def is_unreviewed(self):
return self.status in amo.UNREVIEWED_STATUSES
def is_public(self):
return self.status == amo.STATUS_PUBLIC and not self.disabled_by_user
def is_incomplete(self):
from olympia.devhub.models import SubmitStep # Avoid import loop.
return SubmitStep.objects.filter(addon=self).exists()
def is_pending(self):
return self.status == amo.STATUS_PENDING
def is_rejected(self):
return self.status == amo.STATUS_REJECTED
def can_be_deleted(self):
return not self.is_deleted
@classmethod
def featured_random(cls, app, lang):
return get_featured_ids(app, lang)
def is_no_restart(self):
"""Is this a no-restart add-on?"""
files = self.current_version and self.current_version.all_files
return bool(files and files[0].no_restart)
def is_featured(self, app, lang=None):
"""Is add-on globally featured for this app and language?"""
if app:
return self.id in get_featured_ids(app, lang)
def has_full_profile(self):
"""Is developer profile public (completed)?"""
return self.the_reason and self.the_future
def has_profile(self):
"""Is developer profile (partially or entirely) completed?"""
return self.the_reason or self.the_future
@amo.cached_property
def tags_partitioned_by_developer(self):
"""Returns a tuple of developer tags and user tags for this addon."""
tags = self.tags.not_blacklisted()
if self.is_persona:
return [], tags
user_tags = tags.exclude(addon_tags__user__in=self.listed_authors)
dev_tags = tags.exclude(id__in=[t.id for t in user_tags])
return dev_tags, user_tags
@amo.cached_property
def compatible_apps(self):
"""Shortcut to get compatible apps for the current version."""
# Search providers and personas don't list their supported apps.
if self.type in amo.NO_COMPAT:
return dict((app, None) for app in
amo.APP_TYPE_SUPPORT[self.type])
if self.current_version:
return self.current_version.compatible_apps
else:
return {}
def accepts_compatible_apps(self):
"""True if this add-on lists compatible apps."""
return self.type not in amo.NO_COMPAT
def incompatible_latest_apps(self):
"""Returns a list of applications with which this add-on is
incompatible (based on the latest version).
"""
return [a for a, v in self.compatible_apps.items() if v and
version_int(v.max.version) < version_int(a.latest_version)]
def has_author(self, user, roles=None):
"""True if ``user`` is an author with any of the specified ``roles``.
``roles`` should be a list of valid roles (see amo.AUTHOR_ROLE_*). If
not specified, has_author will return true if the user has any role.
"""
if user is None or user.is_anonymous():
return False
if roles is None:
roles = dict(amo.AUTHOR_CHOICES).keys()
return AddonUser.objects.filter(addon=self, user=user,
role__in=roles).exists()
@property
def takes_contributions(self):
return (self.status == amo.STATUS_PUBLIC and self.wants_contributions
and (self.paypal_id or self.charity_id))
@property
def has_eula(self):
return self.eula
@classmethod
def _last_updated_queries(cls):
"""
Get the queries used to calculate addon.last_updated.
"""
status_change = Max('versions__files__datestatuschanged')
public = (
Addon.objects.no_cache().filter(
status=amo.STATUS_PUBLIC,
versions__files__status=amo.STATUS_PUBLIC)
.exclude(type=amo.ADDON_PERSONA)
.values('id').annotate(last_updated=status_change))
lite = (Addon.objects.no_cache()
.filter(status__in=amo.LISTED_STATUSES,
versions__files__status=amo.STATUS_LITE)
.values('id').annotate(last_updated=status_change))
stati = amo.LISTED_STATUSES + (amo.STATUS_PUBLIC,)
exp = (Addon.objects.no_cache().exclude(status__in=stati)
.filter(versions__files__status__in=amo.VALID_STATUSES)
.values('id')
.annotate(last_updated=Max('versions__files__created')))
personas = (Addon.objects.no_cache().filter(type=amo.ADDON_PERSONA)
.extra(select={'last_updated': 'created'}))
return dict(public=public, exp=exp, personas=personas,
lite=lite)
@amo.cached_property(writable=True)
def all_categories(self):
return list(self.categories.all())
@amo.cached_property(writable=True)
def all_previews(self):
return list(self.get_previews())
def get_previews(self):
"""Exclude promo graphics."""
return self.previews.exclude(position=-1)
@property
def app_categories(self):
categories = sorted_groupby(order_by_translation(self.categories.all(),
'name'),
key=lambda x: x.application)
app_cats = []
for app_id, cats in categories:
app = amo.APP_IDS.get(app_id)
if app_id and not app:
# Skip retired applications like Sunbird.
continue
app_cats.append((app, list(cats)))
return app_cats
def remove_locale(self, locale):
"""NULLify strings in this locale for the add-on and versions."""
for o in itertools.chain([self], self.versions.all()):
Translation.objects.remove_for(o, locale)
def get_localepicker(self):
"""For language packs, gets the contents of localepicker."""
if (self.type == amo.ADDON_LPAPP and self.status == amo.STATUS_PUBLIC
and self.current_version):
files = (self.current_version.files
.filter(platform=amo.PLATFORM_ANDROID.id))
try:
return unicode(files[0].get_localepicker(), 'utf-8')
except IndexError:
pass
return ''
def get_mozilla_contacts(self):
return [x.strip() for x in self.mozilla_contact.split(',')]
def can_review(self, user):
return not(user and self.has_author(user))
@property
def all_dependencies(self):
"""Return all the add-ons this add-on depends on."""
return list(self.dependencies.all()[:3])
def has_installed(self, user):
if not user or not isinstance(user, UserProfile):
return False
return self.installed.filter(user=user).exists()
def get_latest_file(self):
"""Get the latest file from the current version."""
cur = self.current_version
if cur:
res = cur.files.order_by('-created')
if res:
return res[0]
def in_escalation_queue(self):
return self.escalationqueue_set.exists()
def update_names(self, new_names):
"""
Adds, edits, or removes names to match the passed in new_names dict.
Will not remove the translation of the default_locale.
`new_names` is a dictionary mapping of locales to names.
Returns a message that can be used in logs showing what names were
added or updated.
Note: This method doesn't save the changes made to the addon object.
Don't forget to call save() in your calling method.
"""
updated_locales = {}
locales = dict(Translation.objects.filter(id=self.name_id)
.values_list('locale',
'localized_string'))
msg_c = [] # For names that were created.
msg_d = [] # For deletes.
msg_u = [] # For updates.
# Normalize locales.
names = {}
for locale, name in new_names.iteritems():
loc = find_language(locale)
if loc and loc not in names:
names[loc] = name
# Null out names no longer in `names` but exist in the database.
for locale in set(locales) - set(names):
names[locale] = None
for locale, name in names.iteritems():
if locale in locales:
if not name and locale.lower() == self.default_locale.lower():
pass # We never want to delete the default locale.
elif not name: # A deletion.
updated_locales[locale] = None
msg_d.append(u'"%s" (%s).' % (locales.get(locale), locale))
elif name != locales[locale]:
updated_locales[locale] = name
msg_u.append(u'"%s" -> "%s" (%s).' % (
locales[locale], name, locale))
else:
updated_locales[locale] = names.get(locale)
msg_c.append(u'"%s" (%s).' % (name, locale))
if locales != updated_locales:
self.name = updated_locales
return {
'added': ' '.join(msg_c),
'deleted': ' '.join(msg_d),
'updated': ' '.join(msg_u),
}
def update_default_locale(self, locale):
"""
Updates default_locale if it's different and matches one of our
supported locales.
Returns tuple of (old_locale, new_locale) if updated. Otherwise None.
"""
old_locale = self.default_locale
locale = find_language(locale)
if locale and locale != old_locale:
self.update(default_locale=locale)
return old_locale, locale
return None
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the addon.
"""
if require_author:
require_owner = False
ignore_disabled = True
admin = False
return acl.check_addon_ownership(request, self, admin=admin,
viewer=(not require_owner),
ignore_disabled=ignore_disabled)
dbsignals.pre_save.connect(save_signal, sender=Addon,
dispatch_uid='addon_translations')
@receiver(signals.version_changed, dispatch_uid='version_changed')
def version_changed(sender, **kw):
from . import tasks
tasks.version_changed.delay(sender.id)
@receiver(dbsignals.post_save, sender=Addon,
dispatch_uid='addons.search.index')
def update_search_index(sender, instance, **kw):
from . import tasks
if not kw.get('raw'):
tasks.index_addons.delay([instance.id])
@Addon.on_change
def watch_status(old_attr={}, new_attr={}, instance=None,
sender=None, **kw):
"""
Set nomination date if the addon is new in queue or updating.
The nomination date cannot be reset, say, when a developer cancels
their request for full review and re-requests full review.
If a version is rejected after nomination, the developer has
to upload a new version.
"""
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if (new_status not in amo.UNDER_REVIEW_STATUSES + amo.REVIEWED_STATUSES
or not new_status or not instance.latest_version):
return
if old_status not in amo.UNDER_REVIEW_STATUSES:
# New: will (re)set nomination only if it's None.
instance.latest_version.reset_nomination_time()
elif instance.latest_version.has_files:
# Updating: inherit nomination from last nominated version.
# Calls `inherit_nomination` manually given that signals are
# deactivated to avoid circular calls.
inherit_nomination(None, instance.latest_version)
@Addon.on_change
def watch_disabled(old_attr={}, new_attr={}, instance=None, sender=None, **kw):
attrs = dict((k, v) for k, v in old_attr.items()
if k in ('disabled_by_user', 'status'))
if Addon(**attrs).is_disabled and not instance.is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.unhide_disabled_file()
if instance.is_disabled and not Addon(**attrs).is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.hide_disabled_file()
@Addon.on_change
def watch_developer_notes(old_attr={}, new_attr={}, instance=None, sender=None,
**kw):
whiteboard_changed = (
new_attr.get('whiteboard') and
old_attr.get('whiteboard') != new_attr.get('whiteboard'))
developer_comments_changed = (new_attr.get('_developer_comments_cache') and
old_attr.get('_developer_comments_cache') !=
new_attr.get('_developer_comments_cache'))
if whiteboard_changed or developer_comments_changed:
instance.versions.update(has_info_request=False)
def attach_categories(addons):
"""Put all of the add-on's categories into a category_ids list."""
addon_dict = dict((a.id, a) for a in addons)
categories = (Category.objects.filter(addoncategory__addon__in=addon_dict)
.values_list('addoncategory__addon', 'id'))
for addon, cats in sorted_groupby(categories, lambda x: x[0]):
addon_dict[addon].category_ids = [c[1] for c in cats]
def attach_translations(addons):
"""Put all translations into a translations dict."""
attach_trans_dict(Addon, addons)
def attach_tags(addons):
addon_dict = dict((a.id, a) for a in addons)
qs = (Tag.objects.not_blacklisted().filter(addons__in=addon_dict)
.values_list('addons__id', 'tag_text'))
for addon, tags in sorted_groupby(qs, lambda x: x[0]):
addon_dict[addon].tag_list = [t[1] for t in tags]
class Persona(caching.CachingMixin, models.Model):
"""Personas-specific additions to the add-on model."""
STATUS_CHOICES = amo.STATUS_CHOICES_PERSONA
addon = models.OneToOneField(Addon)
persona_id = models.PositiveIntegerField(db_index=True)
# name: deprecated in favor of Addon model's name field
# description: deprecated, ditto
header = models.CharField(max_length=64, null=True)
footer = models.CharField(max_length=64, null=True)
accentcolor = models.CharField(max_length=10, null=True)
textcolor = models.CharField(max_length=10, null=True)
author = models.CharField(max_length=255, null=True)
display_username = models.CharField(max_length=255, null=True)
submit = models.DateTimeField(null=True)
approve = models.DateTimeField(null=True)
movers = models.FloatField(null=True, db_index=True)
popularity = models.IntegerField(null=False, default=0, db_index=True)
license = models.PositiveIntegerField(
choices=amo.PERSONA_LICENSES_CHOICES, null=True, blank=True)
# To spot duplicate submissions.
checksum = models.CharField(max_length=64, blank=True, default='')
dupe_persona = models.ForeignKey('self', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'personas'
def __unicode__(self):
return unicode(self.addon.name)
def is_new(self):
return self.persona_id == 0
def flush_urls(self):
urls = ['*/addon/%d/' % self.addon_id,
'*/api/*/addon/%d' % self.addon_id,
self.thumb_url,
self.icon_url,
self.preview_url,
self.header_url,
self.footer_url,
self.update_url]
return urls
def _image_url(self, filename):
return self.get_mirror_url(filename)
def _image_path(self, filename):
return os.path.join(helpers.user_media_path('addons'),
str(self.addon.id), filename)
def get_mirror_url(self, filename):
host = (settings.PRIVATE_MIRROR_URL if self.addon.is_disabled
else helpers.user_media_url('addons'))
image_url = posixpath.join(host, str(self.addon.id), filename or '')
# TODO: Bust the cache on the hash of the image contents or something.
if self.addon.modified is not None:
modified = int(time.mktime(self.addon.modified.timetuple()))
else:
modified = 0
return '%s?%s' % (image_url, modified)
@amo.cached_property
def thumb_url(self):
"""
Handles deprecated GetPersonas URL.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview.jpg')
@amo.cached_property
def thumb_path(self):
"""
Handles deprecated GetPersonas path.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview.jpg')
@amo.cached_property
def icon_url(self):
"""URL to personas square preview."""
if self.is_new():
return self._image_url('icon.png')
else:
return self._image_url('preview_small.jpg')
@amo.cached_property
def icon_path(self):
"""Path to personas square preview."""
if self.is_new():
return self._image_path('icon.png')
else:
return self._image_path('preview_small.jpg')
@amo.cached_property
def preview_url(self):
"""URL to Persona's big, 680px, preview."""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview_large.jpg')
@amo.cached_property
def preview_path(self):
"""Path to Persona's big, 680px, preview."""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview_large.jpg')
@amo.cached_property
def header_url(self):
return self._image_url(self.header)
@amo.cached_property
def footer_url(self):
return self.footer and self._image_url(self.footer) or ''
@amo.cached_property
def header_path(self):
return self._image_path(self.header)
@amo.cached_property
def footer_path(self):
return self.footer and self._image_path(self.footer) or ''
@amo.cached_property
def update_url(self):
locale = settings.LANGUAGE_URL_MAP.get(trans_real.get_language())
return settings.NEW_PERSONAS_UPDATE_URL % {
'locale': locale or settings.LANGUAGE_CODE,
'id': self.addon.id
}
@amo.cached_property
def theme_data(self):
"""Theme JSON Data for Browser/extension preview."""
def hexcolor(color):
return '#%s' % color
addon = self.addon
return {
'id': unicode(self.addon.id), # Personas dislikes ints
'name': unicode(addon.name),
'accentcolor': hexcolor(self.accentcolor),
'textcolor': hexcolor(self.textcolor),
'category': (unicode(addon.all_categories[0].name) if
addon.all_categories else ''),
# TODO: Change this to be `addons_users.user.display_name`.
'author': self.display_username,
'description': unicode(addon.description),
'header': self.header_url,
'footer': self.footer_url or '',
'headerURL': self.header_url,
'footerURL': self.footer_url or '',
'previewURL': self.thumb_url,
'iconURL': self.icon_url,
'updateURL': self.update_url,
'detailURL': helpers.absolutify(self.addon.get_url_path()),
'version': '1.0'
}
@property
def json_data(self):
"""Persona JSON Data for Browser/extension preview."""
return json.dumps(self.theme_data,
separators=(',', ':'), cls=JSONEncoder)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
qs = (Addon.objects.valid()
.exclude(id=self.addon.id)
.filter(type=amo.ADDON_PERSONA))
return (qs.filter(addonuser__listed=True,
authors__in=self.addon.listed_authors)
.distinct())
@amo.cached_property(writable=True)
def listed_authors(self):
return self.addon.listed_authors
class AddonCategory(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon)
category = models.ForeignKey('Category')
feature = models.BooleanField(default=False)
feature_locales = models.CharField(max_length=255, default='', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'addons_categories'
unique_together = ('addon', 'category')
def flush_urls(self):
urls = ['*/addon/%d/' % self.addon_id,
'*%s' % self.category.get_url_path(), ]
return urls
@classmethod
def creatured_random(cls, category, lang):
return get_creatured_ids(category, lang)
class AddonUser(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon)
user = UserForeignKey()
role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER,
choices=amo.AUTHOR_CHOICES)
listed = models.BooleanField(_(u'Listed'), default=True)
position = models.IntegerField(default=0)
objects = caching.CachingManager()
def __init__(self, *args, **kwargs):
super(AddonUser, self).__init__(*args, **kwargs)
self._original_role = self.role
self._original_user_id = self.user_id
class Meta:
db_table = 'addons_users'
def flush_urls(self):
return self.addon.flush_urls() + self.user.flush_urls()
class AddonDependency(models.Model):
addon = models.ForeignKey(Addon, related_name='addons_dependencies')
dependent_addon = models.ForeignKey(Addon, related_name='dependent_on')
class Meta:
db_table = 'addons_dependencies'
unique_together = ('addon', 'dependent_addon')
class BlacklistedGuid(ModelBase):
guid = models.CharField(max_length=255, unique=True)
comments = models.TextField(default='', blank=True)
class Meta:
db_table = 'blacklisted_guids'
def __unicode__(self):
return self.guid
class Category(OnChangeMixin, ModelBase):
name = TranslatedField()
slug = SlugField(max_length=50,
help_text='Used in Category URLs.')
type = models.PositiveIntegerField(db_column='addontype_id',
choices=do_dictsort(amo.ADDON_TYPE))
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
null=True, blank=True,
db_column='application_id')
count = models.IntegerField('Addon count', default=0)
weight = models.IntegerField(
default=0, help_text='Category weight used in sort ordering')
misc = models.BooleanField(default=False)
addons = models.ManyToManyField(Addon, through='AddonCategory')
class Meta:
db_table = 'categories'
verbose_name_plural = 'Categories'
def __unicode__(self):
return unicode(self.name)
def flush_urls(self):
urls = ['*%s' % self.get_url_path(), ]
return urls
def get_url_path(self):
try:
type = amo.ADDON_SLUGS[self.type]
except KeyError:
type = amo.ADDON_SLUGS[amo.ADDON_EXTENSION]
return reverse('browse.%s' % type, args=[self.slug])
@staticmethod
def transformer(addons):
qs = (Category.objects.no_cache().filter(addons__in=addons)
.extra(select={'addon_id': 'addons_categories.addon_id'}))
cats = dict((addon_id, list(cs))
for addon_id, cs in sorted_groupby(qs, 'addon_id'))
for addon in addons:
addon.all_categories = cats.get(addon.id, [])
def clean(self):
if self.slug.isdigit():
raise ValidationError('Slugs cannot be all numbers.')
dbsignals.pre_save.connect(save_signal, sender=Category,
dispatch_uid='category_translations')
class Preview(ModelBase):
addon = models.ForeignKey(Addon, related_name='previews')
filetype = models.CharField(max_length=25)
thumbtype = models.CharField(max_length=25)
caption = TranslatedField()
position = models.IntegerField(default=0)
sizes = json_field.JSONField(max_length=25, default={})
class Meta:
db_table = 'previews'
ordering = ('position', 'created')
def flush_urls(self):
urls = ['*/addon/%d/' % self.addon_id,
self.thumbnail_url,
self.image_url, ]
return urls
def _image_url(self, url_template):
if self.modified is not None:
modified = int(time.mktime(self.modified.timetuple()))
else:
modified = 0
args = [self.id / 1000, self.id, modified]
if '.png' not in url_template:
args.insert(2, self.file_extension)
return url_template % tuple(args)
def _image_path(self, url_template):
args = [self.id / 1000, self.id]
if '.png' not in url_template:
args.append(self.file_extension)
return url_template % tuple(args)
def as_dict(self, src=None):
d = {'full': urlparams(self.image_url, src=src),
'thumbnail': urlparams(self.thumbnail_url, src=src),
'caption': unicode(self.caption)}
return d
@property
def is_landscape(self):
size = self.image_size
if not size:
return False
return size[0] > size[1]
@property
def file_extension(self):
# Assume that blank is an image.
if not self.filetype:
return 'png'
return self.filetype.split('/')[1]
@property
def thumbnail_url(self):
template = (
helpers.user_media_url('previews') +
'thumbs/%s/%d.png?modified=%s')
return self._image_url(template)
@property
def image_url(self):
template = (
helpers.user_media_url('previews') +
'full/%s/%d.%s?modified=%s')
return self._image_url(template)
@property
def thumbnail_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'thumbs',
'%s',
'%d.png'
)
return self._image_path(template)
@property
def image_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'full',
'%s',
'%d.%s'
)
return self._image_path(template)
@property
def thumbnail_size(self):
return self.sizes.get('thumbnail', []) if self.sizes else []
@property
def image_size(self):
return self.sizes.get('image', []) if self.sizes else []
dbsignals.pre_save.connect(save_signal, sender=Preview,
dispatch_uid='preview_translations')
def delete_preview_files(sender, instance, **kw):
"""On delete of the Preview object from the database, unlink the image
and thumb on the file system """
for filename in [instance.image_path, instance.thumbnail_path]:
if storage.exists(filename):
log.info('Removing filename: %s for preview: %s'
% (filename, instance.pk))
storage.delete(filename)
models.signals.post_delete.connect(delete_preview_files,
sender=Preview,
dispatch_uid='delete_preview_files')
class AppSupport(ModelBase):
"""Cache to tell us if an add-on's current version supports an app."""
addon = models.ForeignKey(Addon)
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min = models.BigIntegerField("Minimum app version", null=True)
max = models.BigIntegerField("Maximum app version", null=True)
class Meta:
db_table = 'appsupport'
unique_together = ('addon', 'app')
class Charity(ModelBase):
name = models.CharField(max_length=255)
url = models.URLField()
paypal = models.CharField(max_length=255)
class Meta:
db_table = 'charities'
@property
def outgoing_url(self):
if self.pk == amo.FOUNDATION_ORG:
return self.url
return get_outgoing_url(unicode(self.url))
class BlacklistedSlug(ModelBase):
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'addons_blacklistedslug'
def __unicode__(self):
return self.name
@classmethod
def blocked(cls, slug):
return slug.isdigit() or cls.objects.filter(name=slug).exists()
class FrozenAddon(models.Model):
"""Add-ons in this table never get a hotness score."""
addon = models.ForeignKey(Addon)
class Meta:
db_table = 'frozen_addons'
def __unicode__(self):
return 'Frozen: %s' % self.addon_id
@receiver(dbsignals.post_save, sender=FrozenAddon)
def freezer(sender, instance, **kw):
# Adjust the hotness of the FrozenAddon.
if instance.addon_id:
Addon.objects.get(id=instance.addon_id).update(hotness=0)
class CompatOverride(ModelBase):
"""Helps manage compat info for add-ons not hosted on AMO."""
name = models.CharField(max_length=255, blank=True, null=True)
guid = models.CharField(max_length=255, unique=True)
addon = models.ForeignKey(Addon, blank=True, null=True,
help_text='Fill this out to link an override '
'to a hosted add-on')
class Meta:
db_table = 'compat_override'
unique_together = ('addon', 'guid')
def save(self, *args, **kw):
if not self.addon:
qs = Addon.objects.filter(guid=self.guid)
if qs:
self.addon = qs[0]
return super(CompatOverride, self).save(*args, **kw)
def __unicode__(self):
if self.addon:
return unicode(self.addon)
elif self.name:
return '%s (%s)' % (self.name, self.guid)
else:
return self.guid
def is_hosted(self):
"""Am I talking about an add-on on AMO?"""
return bool(self.addon_id)
@staticmethod
def transformer(overrides):
if not overrides:
return
id_map = dict((o.id, o) for o in overrides)
qs = CompatOverrideRange.objects.filter(compat__in=id_map)
for compat_id, ranges in sorted_groupby(qs, 'compat_id'):
id_map[compat_id].compat_ranges = list(ranges)
# May be filled in by a transformer for performance.
@amo.cached_property(writable=True)
def compat_ranges(self):
return list(self._compat_ranges.all())
def collapsed_ranges(self):
"""Collapse identical version ranges into one entity."""
Range = collections.namedtuple('Range', 'type min max apps')
AppRange = collections.namedtuple('AppRange', 'app min max')
rv = []
def sort_key(x):
return (x.min_version, x.max_version, x.type)
for key, compats in sorted_groupby(self.compat_ranges, key=sort_key):
compats = list(compats)
first = compats[0]
item = Range(first.override_type(), first.min_version,
first.max_version, [])
for compat in compats:
app = AppRange(amo.APPS_ALL[compat.app],
compat.min_app_version, compat.max_app_version)
item.apps.append(app)
rv.append(item)
return rv
OVERRIDE_TYPES = (
(0, 'Compatible (not supported)'),
(1, 'Incompatible'),
)
class CompatOverrideRange(ModelBase):
"""App compatibility for a certain version range of a RemoteAddon."""
compat = models.ForeignKey(CompatOverride, related_name='_compat_ranges')
type = models.SmallIntegerField(choices=OVERRIDE_TYPES, default=1)
min_version = models.CharField(
max_length=255, default='0',
help_text=u'If not "0", version is required to exist for the override'
u' to take effect.')
max_version = models.CharField(
max_length=255, default='*',
help_text=u'If not "*", version is required to exist for the override'
u' to take effect.')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, default='0')
max_app_version = models.CharField(max_length=255, default='*')
class Meta:
db_table = 'compat_override_range'
def override_type(self):
"""This is what Firefox wants to see in the XML output."""
return {0: 'compatible', 1: 'incompatible'}[self.type]
class IncompatibleVersions(ModelBase):
"""
Denormalized table to join against for fast compat override filtering.
This was created to be able to join against a specific version record since
the CompatOverrideRange can be wildcarded (e.g. 0 to *, or 1.0 to 1.*), and
addon versioning isn't as consistent as Firefox versioning to trust
`version_int` in all cases. So extra logic needed to be provided for when
a particular version falls within the range of a compatibility override.
"""
version = models.ForeignKey(Version, related_name='+')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, blank=True, default='0')
max_app_version = models.CharField(max_length=255, blank=True, default='*')
min_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
max_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
class Meta:
db_table = 'incompatible_versions'
def __unicode__(self):
return u'<IncompatibleVersion V:%s A:%s %s-%s>' % (
self.version.id, self.app.id, self.min_app_version,
self.max_app_version)
def save(self, *args, **kw):
self.min_app_version_int = version_int(self.min_app_version)
self.max_app_version_int = version_int(self.max_app_version)
return super(IncompatibleVersions, self).save(*args, **kw)
def update_incompatible_versions(sender, instance, **kw):
if not instance.compat.addon_id:
return
if not instance.compat.addon.type == amo.ADDON_EXTENSION:
return
from . import tasks
versions = instance.compat.addon.versions.values_list('id', flat=True)
for chunk in chunked(versions, 50):
tasks.update_incompatible_appversions.delay(chunk)
models.signals.post_save.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
models.signals.post_delete.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
def track_new_status(sender, instance, *args, **kw):
if kw.get('raw'):
# The addon is being loaded from a fixure.
return
if kw.get('created'):
track_addon_status_change(instance)
models.signals.post_save.connect(track_new_status,
sender=Addon,
dispatch_uid='track_new_addon_status')
@Addon.on_change
def track_status_change(old_attr={}, new_attr={}, **kw):
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if new_status != old_status:
track_addon_status_change(kw['instance'])
def track_addon_status_change(addon):
statsd.incr('addon_status_change.all.status_{}'
.format(addon.status))
listed_tag = 'listed' if addon.is_listed else 'unlisted'
statsd.incr('addon_status_change.{}.status_{}'
.format(listed_tag, addon.status))
| {
"content_hash": "0c8bf1195105a126f84848cf808ebae8",
"timestamp": "",
"source": "github",
"line_count": 2255,
"max_line_length": 79,
"avg_line_length": 37.58713968957871,
"alnum_prop": 0.5876780046956666,
"repo_name": "jpetto/olympia",
"id": "ff14f887bdd7cb8150bf7b308585e0b515710275",
"size": "84759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/addons/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "665496"
},
{
"name": "HTML",
"bytes": "1606994"
},
{
"name": "JavaScript",
"bytes": "1315514"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "4026490"
},
{
"name": "Shell",
"bytes": "9145"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
} |
from container import Container
| {
"content_hash": "b93fb350f8b1cdc78e19db411892f5d2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.875,
"repo_name": "macbinn/summer",
"id": "3e310304704c47f5e74dc757ff2a0c9a21558921",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "summer/web/container/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22926"
}
],
"symlink_target": ""
} |
"""Unit tests for core identity behavior."""
import os
import uuid
import mock
from oslo_config import cfg
from keystone import exception
from keystone import identity
from keystone.tests import unit as tests
from keystone.tests.unit.ksfixtures import database
CONF = cfg.CONF
class TestDomainConfigs(tests.BaseTestCase):
def setUp(self):
super(TestDomainConfigs, self).setUp()
self.addCleanup(CONF.reset)
self.tmp_dir = tests.dirs.tmp()
CONF.set_override('domain_config_dir', self.tmp_dir, 'identity')
def test_config_for_nonexistent_domain(self):
"""Having a config for a non-existent domain will be ignored.
There are no assertions in this test because there are no side
effects. If there is a config file for a domain that does not
exist it should be ignored.
"""
domain_id = uuid.uuid4().hex
domain_config_filename = os.path.join(self.tmp_dir,
'keystone.%s.conf' % domain_id)
self.addCleanup(lambda: os.remove(domain_config_filename))
with open(domain_config_filename, 'w'):
"""Write an empty config file."""
e = exception.DomainNotFound(domain_id=domain_id)
mock_assignment_api = mock.Mock()
mock_assignment_api.get_domain_by_name.side_effect = e
domain_config = identity.DomainConfigs()
fake_standard_driver = None
domain_config.setup_domain_drivers(fake_standard_driver,
mock_assignment_api)
def test_config_for_dot_name_domain(self):
# Ensure we can get the right domain name which has dots within it
# from filename.
domain_config_filename = os.path.join(self.tmp_dir,
'keystone.abc.def.com.conf')
with open(domain_config_filename, 'w'):
"""Write an empty config file."""
self.addCleanup(os.remove, domain_config_filename)
with mock.patch.object(identity.DomainConfigs,
'_load_config_from_file') as mock_load_config:
domain_config = identity.DomainConfigs()
fake_assignment_api = None
fake_standard_driver = None
domain_config.setup_domain_drivers(fake_standard_driver,
fake_assignment_api)
mock_load_config.assert_called_once_with(fake_assignment_api,
[domain_config_filename],
'abc.def.com')
class TestDatabaseDomainConfigs(tests.TestCase):
def setUp(self):
super(TestDatabaseDomainConfigs, self).setUp()
self.useFixture(database.Database())
self.load_backends()
def test_domain_config_in_database_disabled_by_default(self):
self.assertFalse(CONF.identity.domain_configurations_from_database)
def test_loading_config_from_database(self):
CONF.set_override('domain_configurations_from_database', True,
'identity')
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.resource_api.create_domain(domain['id'], domain)
# Override two config options for our domain
conf = {'ldap': {'url': uuid.uuid4().hex,
'suffix': uuid.uuid4().hex},
'identity': {
'driver': 'keystone.identity.backends.ldap.Identity'}}
self.domain_config_api.create_config(domain['id'], conf)
fake_standard_driver = None
domain_config = identity.DomainConfigs()
domain_config.setup_domain_drivers(fake_standard_driver,
self.resource_api)
# Make sure our two overrides are in place, and others are not affected
res = domain_config.get_domain_conf(domain['id'])
self.assertEqual(conf['ldap']['url'], res.ldap.url)
self.assertEqual(conf['ldap']['suffix'], res.ldap.suffix)
self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope)
# Now turn off using database domain configuration and check that the
# default config file values are now seen instead of the overrides.
CONF.set_override('domain_configurations_from_database', False,
'identity')
domain_config = identity.DomainConfigs()
domain_config.setup_domain_drivers(fake_standard_driver,
self.resource_api)
res = domain_config.get_domain_conf(domain['id'])
self.assertEqual(CONF.ldap.url, res.ldap.url)
self.assertEqual(CONF.ldap.suffix, res.ldap.suffix)
self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope)
| {
"content_hash": "d372b91347496536b87e9d72df88ca6b",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 42.69911504424779,
"alnum_prop": 0.6051813471502591,
"repo_name": "rushiagr/keystone",
"id": "6c8faebbeb0bd58fabd81bc52575f1a1ce97bdb4",
"size": "5371",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/identity/test_core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "3739901"
},
{
"name": "Shell",
"bytes": "10877"
}
],
"symlink_target": ""
} |
import pathlib
import random
import re
import shutil
import subprocess
import tempfile
import uuid
import pytest
from ruamel import yaml
from scripts import generate_terraform
PROJECT_ROOT = generate_terraform.PROJECT_ROOT
FILE_PATHS = {
"dataset": PROJECT_ROOT / "samples" / "dataset.yaml",
"pipeline": PROJECT_ROOT / "samples" / "pipeline.yaml",
"license": PROJECT_ROOT / "templates" / "airflow" / "license_header.py.jinja2",
}
ENV_PATH = PROJECT_ROOT / ".test"
ENV_DATASETS_PATH = ENV_PATH / "datasets"
yaml = yaml.YAML(typ="safe")
@pytest.fixture
def dataset_path():
with tempfile.TemporaryDirectory(
dir=generate_terraform.DATASETS_PATH, suffix="_dataset"
) as dir_path:
try:
yield pathlib.Path(dir_path)
finally:
shutil.rmtree(dir_path, ignore_errors=True)
@pytest.fixture
def pipeline_path(dataset_path, suffix="_pipeline"):
pipelines_dir = dataset_path / "pipelines"
pipelines_dir.mkdir(parents=True, exist_ok=True)
with tempfile.TemporaryDirectory(dir=pipelines_dir, suffix=suffix) as dir_path:
try:
yield pathlib.Path(dir_path)
finally:
shutil.rmtree(dir_path)
@pytest.fixture
def project_id() -> str:
return "test-gcp-project-id"
@pytest.fixture
def bucket_name_prefix() -> str:
return "1234-zyxwvu"
@pytest.fixture
def region() -> str:
return "us-east4"
@pytest.fixture
def impersonating_acct() -> str:
return "test-impersonator@project.iam.gserviceaccount.com"
@pytest.fixture
def gcs_bucket_resource() -> dict:
return {
"type": "storage_bucket",
"name": "{{ friendly_project_id }}.{{ dataset_id }}",
}
@pytest.fixture
def bq_table_resource() -> dict:
return {
"type": "bigquery_table",
"table_id": "test_bq_table",
"schema": [
{"name": "test_col_string", "type": "STRING"},
{"name": "test_col_int", "type": "INT64"},
{"name": "test_col_numeric", "type": "NUMERIC"},
{"name": "test_col_datetime", "type": "DATETIME"},
{"name": "test_col_struct", "type": "STRUCT"},
],
}
@pytest.fixture
def tf_state_bucket() -> str:
return "test-terraform-state-bucket"
@pytest.fixture
def tf_state_prefix() -> str:
return "test/terraform/state"
@pytest.fixture
def env() -> str:
return "test"
def set_dataset_ids_in_config_files(
dataset_path: pathlib.Path, pipeline_path: pathlib.Path
):
shutil.copyfile(FILE_PATHS["dataset"], dataset_path / "pipelines" / "dataset.yaml")
shutil.copyfile(FILE_PATHS["pipeline"], pipeline_path / "pipeline.yaml")
dataset_config = yaml.load(dataset_path / "pipelines" / "dataset.yaml")
dataset_config["dataset"]["name"] = dataset_path.name
for resource in dataset_config["resources"]:
if resource["type"] == "bigquery_dataset":
resource["dataset_id"] = dataset_path.name
yaml.dump(dataset_config, dataset_path / "pipelines" / "dataset.yaml")
pipeline_config = yaml.load(pipeline_path / "pipeline.yaml")
for resource in pipeline_config["resources"]:
if resource["type"] == "bigquery_table":
resource["dataset_id"] = dataset_path.name
yaml.dump(pipeline_config, pipeline_path / "pipeline.yaml")
def test_tf_templates_exist():
for _, filepath in generate_terraform.TEMPLATE_PATHS.items():
assert filepath.exists()
def test_main_generates_tf_files(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
format_code=False,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").exists()
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").exists()
assert (path_prefix / "variables.tf").exists()
assert not (
generate_terraform.DATASETS_PATH
/ dataset_path.name
/ "infra"
/ "terraform.tfvars"
).exists()
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).exists()
assert not (
generate_terraform.DATASETS_PATH / dataset_path.name / "infra" / "backend.tf"
).exists()
assert (ENV_DATASETS_PATH / dataset_path.name / "infra" / "backend.tf").exists()
def test_main_without_tf_remote_state_generates_tf_files_except_backend_tf(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
format_code=False,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").exists()
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").exists()
assert (path_prefix / "variables.tf").exists()
assert not (path_prefix / "backend.tf").exists()
assert not (
generate_terraform.DATASETS_PATH
/ dataset_path.name
/ "infra"
/ "terraform.tfvars"
).exists()
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).exists()
pipeline_path_2 = pipeline_path
def test_main_with_multiple_pipelines(
dataset_path,
pipeline_path,
pipeline_path_2,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
assert pipeline_path.name != pipeline_path_2.name
shutil.copyfile(FILE_PATHS["dataset"], dataset_path / "pipelines" / "dataset.yaml")
shutil.copyfile(FILE_PATHS["pipeline"], pipeline_path / "pipeline.yaml")
shutil.copyfile(FILE_PATHS["pipeline"], pipeline_path_2 / "pipeline.yaml")
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
format_code=False,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").exists()
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").exists()
assert (path_prefix / f"{pipeline_path_2.name}_pipeline.tf").exists()
assert (path_prefix / "variables.tf").exists()
assert not (
generate_terraform.DATASETS_PATH
/ dataset_path.name
/ "infra"
/ "terraform.tfvars"
).exists()
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).exists()
assert not (
generate_terraform.DATASETS_PATH / dataset_path.name / "infra" / "backend.tf"
).exists()
assert (ENV_DATASETS_PATH / dataset_path.name / "infra" / "backend.tf").exists()
def test_main_with_multiple_bq_dataset_ids(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
# First, declare an additional custom BQ dataset in dataset.yaml
another_dataset_id = "another_dataset"
assert another_dataset_id != dataset_path.name
dataset_config = yaml.load(dataset_path / "pipelines" / "dataset.yaml")
dataset_config["resources"].append(
{"type": "bigquery_dataset", "dataset_id": another_dataset_id}
)
yaml.dump(dataset_config, dataset_path / "pipelines" / "dataset.yaml")
# Then, add a BQ table under the additional BQ dataset
pipeline_config = yaml.load(pipeline_path / "pipeline.yaml")
pipeline_config["resources"].append(
{
"type": "bigquery_table",
"table_id": "another_table",
"dataset_id": another_dataset_id,
}
)
yaml.dump(pipeline_config, pipeline_path / "pipeline.yaml")
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
format_code=False,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").exists()
# Match the "google_bigquery_dataset" properties, i.e. any lines between the
# curly braces, in the *_dataset.tf file
regexp = r"\"google_bigquery_dataset\" \"" + r"[A-Za-z0-9_]+" + r"\" \{(.*?)\}"
bq_dataset_tf_string = re.compile(regexp, flags=re.MULTILINE | re.DOTALL)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
matches = bq_dataset_tf_string.findall(
(path_prefix / f"{dataset_path.name}_dataset.tf").read_text()
)
dataset_ids = set()
for match in matches:
result = re.search(r"dataset_id\s+\=\s+\"([A-Za-z0-9_]+)\"", match)
assert result.group(1)
dataset_ids.add(result.group(1))
# Assert that the dataset_ids are unique
assert len(dataset_ids) == len(matches)
assert another_dataset_id in dataset_ids
assert dataset_path.name in dataset_ids
def test_dataset_without_any_pipelines(
dataset_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
(dataset_path / "pipelines").mkdir(parents=True)
shutil.copyfile(FILE_PATHS["dataset"], dataset_path / "pipelines" / "dataset.yaml")
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
format_code=False,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").exists()
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert not (
generate_terraform.DATASETS_PATH
/ dataset_path.name
/ "infra"
/ "terraform.tfvars"
).exists()
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).exists()
assert not (
generate_terraform.DATASETS_PATH / dataset_path.name / "infra" / "backend.tf"
).exists()
assert (ENV_DATASETS_PATH / dataset_path.name / "infra" / "backend.tf").exists()
def test_dataset_path_does_not_exist(
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
with pytest.raises(FileNotFoundError):
generate_terraform.main(
"non_existing_dir",
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
)
def test_generated_tf_files_contain_license_headers(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
format_code=False,
)
license_header = pathlib.Path(
generate_terraform.TEMPLATE_PATHS["license"]
).read_text()
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").read_text().count(license_header) == 1
assert (path_prefix / f"{dataset_path.name}_dataset.tf").read_text().count(
license_header
) == 1
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").read_text().count(
license_header
) == 1
assert (path_prefix / "variables.tf").read_text().count(license_header) == 1
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).read_text().count(license_header) == 1
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "backend.tf"
).read_text().count(license_header) == 1
def test_dataset_tf_file_contains_description_when_specified(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
format_code=False,
)
config = yaml.load(open(dataset_path / "pipelines" / "dataset.yaml"))
bq_dataset = next(
(r for r in config["resources"] if r["type"] == "bigquery_dataset"), None
)
assert bq_dataset
assert bq_dataset["description"]
# Match the "google_bigquery_dataset" properties, i.e. any lines between the
# curly braces, in the *_dataset.tf file
regexp = r"\"google_bigquery_dataset\" \"" + dataset_path.name + r"\" \{(.*?)\}"
bq_dataset_tf_string = re.compile(regexp, flags=re.MULTILINE | re.DOTALL)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
result = bq_dataset_tf_string.search(
(path_prefix / f"{dataset_path.name}_dataset.tf").read_text()
)
assert re.search(r"dataset_id\s+\=", result.group(1))
assert re.search(r"description\s+\=", result.group(1))
def test_bq_dataset_can_have_a_description_with_newlines_and_quotes(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
shutil.copyfile(FILE_PATHS["dataset"], dataset_path / "pipelines" / "dataset.yaml")
shutil.copyfile(FILE_PATHS["pipeline"], pipeline_path / "pipeline.yaml")
config = yaml.load(open(dataset_path / "pipelines" / "dataset.yaml"))
# Get a bigquery_dataset resource and modify the `description` field
bq_dataset = next(
(r for r in config["resources"] if r["type"] == "bigquery_dataset"), None
)
test_description = 'Multiline\nstring with\n"quotes"'
bq_dataset["description"] = test_description
with open(dataset_path / "pipelines" / "dataset.yaml", "w") as file:
yaml.dump(config, file)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
format_code=False,
)
env_dataset_path = ENV_DATASETS_PATH / dataset_path.name
subprocess.check_call(["terraform", "fmt"], cwd=env_dataset_path / "infra")
def test_dataset_tf_has_no_bq_dataset_description_when_unspecified(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
config = yaml.load(open(dataset_path / "pipelines" / "dataset.yaml"))
# Get the first bigquery_dataset resource and delete the `description` field
bq_dataset = next(
(r for r in config["resources"] if r["type"] == "bigquery_dataset")
)
del bq_dataset["description"]
with open(dataset_path / "pipelines" / "dataset.yaml", "w") as file:
yaml.dump(config, file)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
# Match the "google_bigquery_dataset" properties, i.e. any lines between the
# curly braces, in the *_dataset.tf file
regexp = r"\"google_bigquery_dataset\" \"" + dataset_path.name + r"\" \{(.*?)\}"
bq_dataset_tf_string = re.compile(regexp, flags=re.MULTILINE | re.DOTALL)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
result = bq_dataset_tf_string.search(
(path_prefix / f"{dataset_path.name}_dataset.tf").read_text()
)
assert re.search(r"dataset_id\s+\=", result.group(1))
assert not re.search(r"description\s+\=", result.group(1))
def test_pipeline_tf_contains_optional_properties_when_specified(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
config = yaml.load(open(pipeline_path / "pipeline.yaml"))
bq_table = next(
(r for r in config["resources"] if r["type"] == "bigquery_table"), None
)
assert bq_table
assert bq_table["description"]
assert bq_table["time_partitioning"]
assert bq_table["clustering"]
assert bq_table["deletion_protection"]
# Match the "google_bigquery_table" properties, i.e. any lines between the
# curly braces, in the *_pipeline.tf file
regexp = (
r"\"google_bigquery_table\" \""
+ bq_table["dataset_id"]
+ "_"
+ bq_table["table_id"]
+ r"\" \{(.*?)^\}"
)
bq_table_tf_string = re.compile(regexp, flags=re.MULTILINE | re.DOTALL)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
result = bq_table_tf_string.search(
(path_prefix / f"{pipeline_path.name}_pipeline.tf").read_text()
)
assert re.search(r"table_id\s+\=", result.group(1))
assert re.search(r"description\s+\=", result.group(1))
assert re.search(r"time_partitioning\s+\{", result.group(1))
assert re.search(r"clustering\s+\=", result.group(1))
assert re.search(r"deletion_protection\s+\=", result.group(1))
def test_infra_vars_are_in_tfvars_file(
dataset_path,
pipeline_path,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
# Creates a .env.test.yaml file in the dataset folder and sets its contents
env_vars = {
"infra": {
"project_id": f"test-{uuid.uuid4()}",
"region": "test_region",
"env": env,
}
}
yaml.dump(env_vars, dataset_path / f".vars.{env}.yaml")
generate_terraform.main(
dataset_path.name,
"",
"",
"",
"",
env,
"",
"",
)
tfvars_file = ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
assert tfvars_file.exists()
for key, val in env_vars["infra"].items():
# Matches the following expressions in the *.tfvars file
#
# key = "value"
# another_key = "another value"
regexp = key + r"\s+= \"" + val + r"\""
assert re.search(regexp, tfvars_file.read_text())
def test_infra_vars_generates_gcs_buckets_with_iam_policies(
dataset_path,
pipeline_path,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
test_bucket = f"bucket-{uuid.uuid4()}"
# Replace bucket name in dataset.yaml
dataset_config = yaml.load(dataset_path / "pipelines" / "dataset.yaml")
for resource in dataset_config["resources"]:
if resource["type"] == "storage_bucket":
resource["name"] = test_bucket
yaml.dump(dataset_config, dataset_path / "pipelines" / "dataset.yaml")
# Creates a .env.test.yaml file in the dataset folder and sets its contents
env_vars = {
"infra": {
"project_id": f"test-{uuid.uuid4()}",
"region": "test_region",
"env": env,
"iam_policies": {
"storage_buckets": {
test_bucket: [
{
"role": "roles/storage.objectViewer",
"members": ["test-user@google.com"],
}
]
}
},
}
}
yaml.dump(env_vars, dataset_path / f".vars.{env}.yaml")
generate_terraform.main(
dataset_path.name,
"",
"",
"",
"",
env,
"",
"",
)
dataset_tf_file = dataset_path / "infra" / f"{dataset_path.name}_dataset.tf"
assert dataset_tf_file.exists()
regex_data_iam_block = (
r"data \"google_iam_policy\" \"storage_bucket__" + test_bucket + r"\" \{"
)
assert re.search(regex_data_iam_block, dataset_tf_file.read_text())
regex_resource_iam_block = (
r"resource \"google_storage_bucket_iam_policy\" \"" + test_bucket + r"\" \{"
)
assert re.search(regex_resource_iam_block, dataset_tf_file.read_text())
def test_infra_vars_generates_bq_datasets_with_iam_policies(
dataset_path,
pipeline_path,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
bq_dataset = f"bq-dataset-{uuid.uuid4()}"
# Replace bucket name in dataset.yaml
dataset_config = yaml.load(dataset_path / "pipelines" / "dataset.yaml")
for resource in dataset_config["resources"]:
if resource["type"] == "bigquery_dataset":
resource["dataset_id"] = bq_dataset
yaml.dump(dataset_config, dataset_path / "pipelines" / "dataset.yaml")
# Creates a .env.test.yaml file in the dataset folder and sets its contents
env_vars = {
"infra": {
"project_id": f"test-{uuid.uuid4()}",
"region": "test_region",
"env": env,
"iam_policies": {
"bigquery_datasets": {
bq_dataset: [
{
"role": "roles/storage.objectViewer",
"members": ["test-user@google.com"],
}
]
}
},
}
}
yaml.dump(env_vars, dataset_path / f".vars.{env}.yaml")
generate_terraform.main(dataset_path.name, "", "", "", "", env, "", "")
dataset_tf_file = dataset_path / "infra" / f"{dataset_path.name}_dataset.tf"
assert dataset_tf_file.exists()
regex_data_iam_block = (
r"data \"google_iam_policy\" \"bq_ds__" + bq_dataset + r"\" \{"
)
assert re.search(regex_data_iam_block, dataset_tf_file.read_text())
regex_resource_iam_block = (
r"resource \"google_bigquery_dataset_iam_policy\" \"" + bq_dataset + r"\" \{"
)
assert re.search(regex_resource_iam_block, dataset_tf_file.read_text())
def test_infra_vars_without_iam_policies_generate_tf_without_iam_policies(
dataset_path,
pipeline_path,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
test_bucket = f"bucket-{uuid.uuid4()}"
# Replace bucket name in dataset.yaml
dataset_config = yaml.load(dataset_path / "pipelines" / "dataset.yaml")
for resource in dataset_config["resources"]:
if resource["type"] == "storage_bucket":
resource["name"] = test_bucket
yaml.dump(dataset_config, dataset_path / "pipelines" / "dataset.yaml")
# Creates a .env.test.yaml file in the dataset folder and sets its contents
env_vars = {
"infra": {
"project_id": f"test-{uuid.uuid4()}",
"region": "test_region",
"env": env,
}
}
yaml.dump(env_vars, dataset_path / f".vars.{env}.yaml")
generate_terraform.main(
dataset_path.name, "", "", "", "", env, "", "", format_code=False
)
dataset_tf_file = dataset_path / "infra" / f"{dataset_path.name}_dataset.tf"
assert dataset_tf_file.exists()
assert not re.search("google_iam_policy", dataset_tf_file.read_text())
def test_pipeline_tf_has_no_optional_properties_when_unspecified(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
config = yaml.load(open(pipeline_path / "pipeline.yaml"))
# Get the first bigquery_table resource and delete the `description` field
bq_table = next((r for r in config["resources"] if r["type"] == "bigquery_table"))
del bq_table["description"]
del bq_table["time_partitioning"]
del bq_table["clustering"]
del bq_table["deletion_protection"]
with open(pipeline_path / "pipeline.yaml", "w") as file:
yaml.dump(config, file)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
# Match the "google_bigquery_table" properties, i.e. any lines between the
# curly braces, in the *_pipeline.tf file
regexp = (
r"\"google_bigquery_table\" \""
+ bq_table["dataset_id"]
+ "_"
+ bq_table["table_id"]
+ r"\" \{(.*?)^\}"
)
bq_table_tf_string = re.compile(regexp, flags=re.MULTILINE | re.DOTALL)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
result = bq_table_tf_string.search(
(path_prefix / f"{pipeline_path.name}_pipeline.tf").read_text()
)
assert re.search(r"table_id\s+\=", result.group(1))
assert not re.search(r"description\s+\=", result.group(1))
assert not re.search(r"time_partitioning\s+\{", result.group(1))
assert not re.search(r"clustering\s+\=", result.group(1))
assert not re.search(r"deletion_protection\s+\=", result.group(1))
def test_bq_table_can_have_a_description_with_newlines_and_quotes(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
config = yaml.load(open(pipeline_path / "pipeline.yaml"))
# Get a bigquery_table resource and modify the `description` field
bq_table = next(
(r for r in config["resources"] if r["type"] == "bigquery_table"), None
)
bq_table["description"] = 'Multiline\nstring with\n"quotes"'
with open(pipeline_path / "pipeline.yaml", "w") as file:
yaml.dump(config, file)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
format_code=False,
)
env_dataset_path = ENV_DATASETS_PATH / dataset_path.name
subprocess.check_call(["terraform", "fmt"], cwd=env_dataset_path / "infra")
def test_bq_table_name_starts_with_digits_but_tf_resource_name_does_not(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
config = yaml.load(open(pipeline_path / "pipeline.yaml"))
table_name_starting_with_digit = f"{str(random.randint(0, 9))}_table"
# In the YAML config, set the BigQuery table name to start with a digit
bq_table = next(
(r for r in config["resources"] if r["type"] == "bigquery_table"), None
)
bq_table["table_id"] = table_name_starting_with_digit
with open(pipeline_path / "pipeline.yaml", "w") as file:
yaml.dump(config, file)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
# Match the Terraform resource name and the table_id value in the BigQuery
# table's resource definition. As a concrete example, substrings in
# ALL_CAPS are matched below:
#
# resource "google_bigquery_table" "RESOURCE_NAME_STARTING_WITH_NONDIGIT" {
# description = ""
# table_id = "TABLE_NAME_STARTING_WITH_DIGIT"
# }
tf_resource_regexp = r"\"google_bigquery_table\" \"([a-zA-Z0-9_-]+)\" .*?"
table_id_regexp = r"table_id\s+\= \"(.*?)\"\n"
matcher = re.compile(
tf_resource_regexp + table_id_regexp,
flags=re.MULTILINE | re.DOTALL,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
result = matcher.search(
(path_prefix / f"{pipeline_path.name}_pipeline.tf").read_text()
)
tf_resource_name = result.group(1)
table_id = result.group(2)
assert table_id == table_name_starting_with_digit
assert not tf_resource_name[0].isdigit()
assert table_id[0].isdigit()
assert table_id in tf_resource_name
def test_bucket_names_must_not_contain_dots_and_google():
for name in (
"test.bucket.name",
"google-bucket",
"google.bucket.name",
"g00gle",
"googl3",
):
with pytest.raises(ValueError):
generate_terraform.validate_bucket_name(name)
def test_bucket_names_must_use_hyphens_instead_of_underscores():
for name in (
"test_underscore",
"test-bucket_with-underscore",
):
with pytest.raises(ValueError):
generate_terraform.validate_bucket_name(name)
def test_bucket_prefixes_must_use_hyphens_instead_of_underscores(
dataset_path,
project_id,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
for prefix in (
"test_prefix",
"test-hyphens_and_underscores",
):
with pytest.raises(ValueError):
generate_terraform.main(
dataset_path.name,
project_id,
prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
format_code=False,
)
def test_validation_on_generated_tf_files_in_dot_env_dir(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
format_code=False,
)
env_dataset_path = ENV_DATASETS_PATH / dataset_path.name
subprocess.check_call(["terraform", "init"], cwd=env_dataset_path / "infra")
subprocess.check_call(["terraform", "validate"], cwd=env_dataset_path / "infra")
def test_validation_on_generated_tf_files_in_project_dir(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
format_code=False,
)
project_dataset_path = generate_terraform.DATASETS_PATH / dataset_path.name
subprocess.check_call(["terraform", "init"], cwd=(project_dataset_path / "infra"))
subprocess.check_call(
["terraform", "validate"], cwd=(project_dataset_path / "infra")
)
| {
"content_hash": "e62b2e33d15272def95bc4eec7d62631",
"timestamp": "",
"source": "github",
"line_count": 1142,
"max_line_length": 87,
"avg_line_length": 29.080560420315237,
"alnum_prop": 0.5979524239686841,
"repo_name": "GoogleCloudPlatform/public-datasets-pipelines",
"id": "7ce0fa93e86558d846c3e0197c6db94c138f8cb6",
"size": "33787",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/scripts/test_generate_terraform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "101888"
},
{
"name": "HCL",
"bytes": "678082"
},
{
"name": "Jinja",
"bytes": "12539"
},
{
"name": "Jupyter Notebook",
"bytes": "655592"
},
{
"name": "Python",
"bytes": "4784376"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template, request
import requests
app = Flask(__name__)
@app.route("/")
def index():
r = requests.get('http://kritikosbot-telegram.appspot.com/gamestatusjson')
ready = r.json()["ready"]
if not ready:
return "Waiting for players..."
else:
cards = r.json()['cards']
return render_template("index.html",
cards=cards)
if __name__ == "__main__":
app.run()
| {
"content_hash": "9adfcbd2818ec5d8746c91525f1cbc68",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 25.61111111111111,
"alnum_prop": 0.5726681127982647,
"repo_name": "anbasile/donderskritikos",
"id": "a09089b5168e64df3cdd5ccacae1822e6c3acaa8",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hello.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8827"
},
{
"name": "Python",
"bytes": "1503"
}
],
"symlink_target": ""
} |
from Base.IFilesRepository import IFilesRepository
class BaseRepository(IFilesRepository):
def __init__(self):
self.BaseDirectory = "Templates/Base"
self.Pattern = "*.template"
super(BaseRepository, self).__init__(self.BaseDirectory)
def getPattern(self):
return self.Pattern | {
"content_hash": "06ddfa896a96fa7e04a71cc09f12b968",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 58,
"avg_line_length": 28.7,
"alnum_prop": 0.7630662020905923,
"repo_name": "afronski/grammar-generator",
"id": "dbd1afcc76d923fdbfa7c85e0ff3d9e0d9d4e952",
"size": "287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grammar-generator/Repositories/BaseRepository.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76329"
},
{
"name": "Shell",
"bytes": "140"
}
],
"symlink_target": ""
} |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dirtyfields.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 't!=2ot+d^ra^cglnhak-zi91#^*fco%183wgylb7ozqgk!&f*2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'example_app.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'example_app.testing_app'
)
| {
"content_hash": "e9e38496c852793e4babf3bef9cdbfd0",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 122,
"avg_line_length": 35.82105263157895,
"alnum_prop": 0.6858654128709962,
"repo_name": "chrisglass/django-dirtyfields",
"id": "4cc611a874292f746f5ce2803b7158480bc688da",
"size": "3447",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "example_app/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7865"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem.Fraggle import FraggleSim
if __name__ =='__main__':
import sys,re
if (len(sys.argv) >= 2):
print("Program to run the first part of Fraggle. Program splits the molecule\nready for the search\n")
print("USAGE: ./fraggle.py <file_of_smiles")
print("Format of smiles file: SMILES ID (space or comma separated)")
print("Output: whole mol smiles,ID,fraggle split smiles\n")
sys.exit(1)
#read the STDIN
for line in sys.stdin:
line = line.rstrip()
smi,id_ = re.split('\s|,',line)
#print smi,id_
mol = Chem.MolFromSmiles(smi)
if mol is None:
sys.stderr.write("Can't generate mol for: %s\n" % (smi) )
continue
out_fragments = FraggleSim.generate_fraggle_fragmentation(mol)
#print out the unique fragments
for x in out_fragments:
#cansmi
temp = Chem.MolFromSmiles(x)
print("%s,%s,%s" % (smi,id_,Chem.MolToSmiles(temp)))
| {
"content_hash": "5cead30644fa03ea6c96139ec34bcecc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 110,
"avg_line_length": 32.515151515151516,
"alnum_prop": 0.5983224603914259,
"repo_name": "strets123/rdkit",
"id": "21c0cd7ad6cf8a6a383fb339e672ee27f1f48bd1",
"size": "2753",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Contrib/fraggle/fraggle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "203078"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "7068170"
},
{
"name": "CMake",
"bytes": "584702"
},
{
"name": "CSS",
"bytes": "4742"
},
{
"name": "FORTRAN",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "65468"
},
{
"name": "Java",
"bytes": "248620"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "LLVM",
"bytes": "27271"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "15431"
},
{
"name": "Objective-C",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3033212"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "8899"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "49170"
}
],
"symlink_target": ""
} |
import logging
logger = logging.getLogger("photomanager")
def __init_logger(logger):
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler("photomanager.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)
__init_logger(logger)
| {
"content_hash": "edaeae08efd0b7db1d33a99aa9b4ac5b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 89,
"avg_line_length": 25.2,
"alnum_prop": 0.7162698412698413,
"repo_name": "wrenchzc/photomanager",
"id": "0f6b82af189a4b11882473b2e702484eeb0a0c8e",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "photomanager/utils/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87987"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import socket
from django.contrib.sites.models import Site
from django.utils import six
from django.utils.six.moves.urllib.parse import urljoin
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.site.urlresolvers import local_site_reverse
def get_server_url(local_site_name=None, local_site=None, request=None):
"""Return the URL for the root of the server.
This will construct a URL that points to the root of the server, factoring
in whether to use HTTP or HTTPS.
If ``local_site_name`` or ``local_site`` is provided, then the URL will be
the root to the LocalSite's root, rather than the server's root.
If ``request`` is provided, then the Local Site, if any, will be
inferred from the request.
"""
site = Site.objects.get_current()
siteconfig = SiteConfiguration.objects.get_current()
root = local_site_reverse('root', local_site_name=local_site_name,
local_site=local_site, request=request)
return '%s://%s%s' % (siteconfig.get('site_domain_method'),
site.domain, root)
def build_server_url(*args, **kwargs):
"""Build an absolute URL containing the full URL to the server.
All additional arguments passed will be appended as paths to the URL.
"""
return urljoin(get_server_url(**kwargs), *args)
def get_hostname():
"""Return the hostname for this Review Board server.
Returns:
unicode:
The hostname for the server.
"""
return six.text_type(socket.gethostname())
| {
"content_hash": "adcd50fd22590671aaffb86a179955ae",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.6867167919799498,
"repo_name": "brennie/reviewboard",
"id": "828ad6356a319b5fc81231671b96771f23ddb6b8",
"size": "1596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/admin/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "247208"
},
{
"name": "HTML",
"bytes": "204351"
},
{
"name": "JavaScript",
"bytes": "2557855"
},
{
"name": "Python",
"bytes": "5241630"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import gc
import sys
import traceback
__all__ = ['measure', 'measure_with_rehearsal']
def measure_with_rehearsal():
"""
Runs a benchmark when used as an iterator, injecting a garbage
collection between iterations. Example::
for b in riak.benchmark.measure_with_rehearsal():
with b.report("pow"):
for _ in range(10000):
math.pow(2,10000)
with b.report("factorial"):
for i in range(100):
math.factorial(i)
"""
return Benchmark(True)
def measure():
"""
Runs a benchmark once when used as a context manager. Example::
with riak.benchmark.measure() as b:
with b.report("pow"):
for _ in range(10000):
math.pow(2,10000)
with b.report("factorial"):
for i in range(100):
math.factorial(i)
"""
return Benchmark()
class Benchmark(object):
"""
A benchmarking run, which may consist of multiple steps. See
measure_with_rehearsal() and measure() for examples.
"""
def __init__(self, rehearse=False):
"""
Creates a new benchmark reporter.
:param rehearse: whether to run twice to take counter the effects
of garbage collection
:type rehearse: boolean
"""
self.rehearse = rehearse
if rehearse:
self.count = 2
else:
self.count = 1
self._report = None
def __enter__(self):
if self.rehearse:
raise ValueError("measure_with_rehearsal() cannot be used in with "
"statements, use measure() or the for..in "
"statement")
print_header()
self._report = BenchmarkReport()
self._report.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._report:
return self._report.__exit__(exc_type, exc_val, exc_tb)
else:
print
return True
def __iter__(self):
return self
def next(self):
"""
Runs the next iteration of the benchmark.
"""
if self.count == 0:
raise StopIteration
elif self.count > 1:
print_rehearsal_header()
else:
if self.rehearse:
gc.collect()
print("-" * 59)
print()
print_header()
self.count -= 1
return self
def __next__(self):
# Python 3.x Version
return self.next()
def report(self, name):
"""
Returns a report for the current step of the benchmark.
"""
self._report = None
return BenchmarkReport(name)
def print_rehearsal_header():
"""
Prints the header for the rehearsal phase of a benchmark.
"""
print
print("Rehearsal -------------------------------------------------")
def print_report(label, user, system, real):
"""
Prints the report of one step of a benchmark.
"""
print("{:<12s} {:12f} {:12f} ( {:12f} )".format(label,
user,
system,
real))
def print_header():
"""
Prints the header for the normal phase of a benchmark.
"""
print("{:<12s} {:<12s} {:<12s} ( {:<12s} )"
.format('', 'user', 'system', 'real'))
class BenchmarkReport(object):
"""
A labeled step in a benchmark. Acts as a context-manager, printing
its timing results when the context exits.
"""
def __init__(self, name='benchmark'):
self.name = name
self.start = None
def __enter__(self):
self.start = os.times()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
user1, system1, _, _, real1 = self.start
user2, system2, _, _, real2 = os.times()
print_report(self.name, user2 - user1, system2 - system1,
real2 - real1)
elif exc_type is KeyboardInterrupt:
return False
else:
msg = "EXCEPTION! type: %r val: %r" % (exc_type, exc_val)
print(msg, file=sys.stderr)
traceback.print_tb(exc_tb)
return True if exc_type is None else False
| {
"content_hash": "f4abc15573c6343ed21be9167c34f195",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 79,
"avg_line_length": 27.662576687116566,
"alnum_prop": 0.5058771346196496,
"repo_name": "basho/riak-python-client",
"id": "e1f3e55cd5b48fa381a5c72da9183b2410749f65",
"size": "5106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "riak/benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4128"
},
{
"name": "Erlang",
"bytes": "32681"
},
{
"name": "HTML",
"bytes": "1061"
},
{
"name": "Makefile",
"bytes": "8954"
},
{
"name": "PowerShell",
"bytes": "445"
},
{
"name": "Python",
"bytes": "924273"
},
{
"name": "Shell",
"bytes": "11498"
}
],
"symlink_target": ""
} |
"""Module that runs application in development mode."""
import os
import click
from btt import create_app
from btt.database import (
db, User, Category, Account, Transaction, Group, MemberShip, create_db)
import unittest
from btt.classification import classification_score
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
@app.shell_context_processor
def make_shell_context():
"""Create a shell context so that can use REPL."""
return dict(app=app, db=db, User=User, Category=Category, Account=Account,
Transaction=Transaction, Group=Group, MemberShip=MemberShip)
@app.cli.command()
def test():
"""Run the unit tests."""
tests = unittest.TestLoader().discover('btt.tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@app.cli.command()
def newdb():
"""Create a new empty database."""
print('This operation will overwrite any existing databases.')
ans = input('Are you sure you want to create a new empty database y/n ? ')
if ans == 'y':
print('Creating new empty database...')
create_db()
print("Done.")
print("Do not forget to change the demo account password!!!")
else:
print('No action taken.')
@app.cli.command()
@click.argument('group_id')
def classify(group_id):
"""Test transaction categorization for email."""
score, data_size, num_features = classification_score(group_id)
print('Score: ', score)
print('Data Size: ', data_size)
print('Number of Features: ', num_features)
| {
"content_hash": "170e2c7cdf306772049c490cd7eff0e8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 31.102040816326532,
"alnum_prop": 0.678477690288714,
"repo_name": "gregcowell/BAM",
"id": "6dd6b8d62030e805de64379bb91517418294521b",
"size": "1524",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10198"
},
{
"name": "HTML",
"bytes": "4026"
},
{
"name": "Python",
"bytes": "13081"
}
],
"symlink_target": ""
} |
import json
import hashlib
import os
import re
import collections
import kubernetes
import yaml
from time import sleep
import lru
from metadata_helpers import *
namespace_to_watch = os.environ.get('NAMESPACE_TO_WATCH', 'default')
pod_name_to_execution_id_size = os.environ.get('POD_NAME_TO_EXECUTION_ID_SIZE', 5000)
workflow_name_to_context_id_size = os.environ.get('WORKFLOW_NAME_TO_CONTEXT_ID_SIZE', 5000)
pods_with_written_metadata_size = os.environ.get('PODS_WITH_WRITTEN_METADATA_SIZE', 5000)
debug_files_size = os.environ.get('DEBUG_FILES_SIZE', 5000)
kubernetes.config.load_incluster_config()
k8s_api = kubernetes.client.CoreV1Api()
k8s_watch = kubernetes.watch.Watch()
patch_retries = 20
sleep_time = 0.1
def patch_pod_metadata(
namespace: str,
pod_name: str,
patch: dict,
k8s_api: kubernetes.client.CoreV1Api = None,
):
k8s_api = k8s_api or kubernetes.client.CoreV1Api()
patch = {
'metadata': patch
}
for retry in range(patch_retries):
try:
pod = k8s_api.patch_namespaced_pod(
name=pod_name,
namespace=namespace,
body=patch,
)
return pod
except Exception as e:
print(e)
sleep(sleep_time)
#Connecting to MetadataDB
mlmd_store = connect_to_mlmd()
print("Connected to the metadata store")
ARGO_OUTPUTS_ANNOTATION_KEY = 'workflows.argoproj.io/outputs'
ARGO_TEMPLATE_ENV_KEY = 'ARGO_TEMPLATE'
KFP_COMPONENT_SPEC_ANNOTATION_KEY = 'pipelines.kubeflow.org/component_spec'
KFP_PARAMETER_ARGUMENTS_ANNOTATION_KEY = 'pipelines.kubeflow.org/arguments.parameters'
METADATA_EXECUTION_ID_LABEL_KEY = 'pipelines.kubeflow.org/metadata_execution_id'
METADATA_CONTEXT_ID_LABEL_KEY = 'pipelines.kubeflow.org/metadata_context_id'
KFP_SDK_TYPE_LABEL_KEY = 'pipelines.kubeflow.org/pipeline-sdk-type'
TFX_SDK_TYPE_VALUE = 'tfx'
METADATA_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_artifact_ids'
METADATA_INPUT_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_input_artifact_ids'
METADATA_OUTPUT_ARTIFACT_IDS_ANNOTATION_KEY = 'pipelines.kubeflow.org/metadata_output_artifact_ids'
KFP_V2_COMPONENT_ANNOTATION_KEY = 'pipelines.kubeflow.org/v2_component'
KFP_V2_COMPONENT_ANNOTATION_VALUE = 'true'
ARGO_WORKFLOW_LABEL_KEY = 'workflows.argoproj.io/workflow'
ARGO_COMPLETED_LABEL_KEY = 'workflows.argoproj.io/completed'
METADATA_WRITTEN_LABEL_KEY = 'pipelines.kubeflow.org/metadata_written'
def output_name_to_argo(name: str) -> str:
import re
# This sanitization code should be kept in sync with the code in the DSL compiler.
# See https://github.com/kubeflow/pipelines/blob/39975e3cde7ba4dcea2bca835b92d0fe40b1ae3c/sdk/python/kfp/compiler/_k8s_helper.py#L33
return re.sub('-+', '-', re.sub('[^-_0-9A-Za-z]+', '-', name)).strip('-')
def is_s3_endpoint(endpoint: str) -> bool:
return re.search('^.*s3.*amazonaws.com.*$', endpoint)
def get_object_store_provider(endpoint: str) -> bool:
if is_s3_endpoint(endpoint):
return 's3'
else:
return 'minio'
def argo_artifact_to_uri(artifact: dict) -> str:
# s3 here means s3 compatible object storage. not AWS S3.
if 's3' in artifact:
s3_artifact = artifact['s3']
return '{provider}://{bucket}/{key}'.format(
provider=get_object_store_provider(s3_artifact.get('endpoint', '')),
bucket=s3_artifact.get('bucket', ''),
key=s3_artifact.get('key', ''),
)
elif 'raw' in artifact:
return None
else:
return None
def is_tfx_pod(pod) -> bool:
# The label defaults to 'tfx', but is overridable.
# Official tfx templates override the value to 'tfx-template', so
# we loosely match the word 'tfx'.
if TFX_SDK_TYPE_VALUE in pod.metadata.labels.get(KFP_SDK_TYPE_LABEL_KEY, ''):
return True
main_containers = [container for container in pod.spec.containers if container.name == 'main']
if len(main_containers) != 1:
return False
main_container = main_containers[0]
return main_container.command and main_container.command[-1].endswith('tfx/orchestration/kubeflow/container_entrypoint.py')
def is_kfp_v2_pod(pod) -> bool:
return pod.metadata.annotations.get(KFP_V2_COMPONENT_ANNOTATION_KEY) == KFP_V2_COMPONENT_ANNOTATION_VALUE
# Caches (not expected to be persistent)
# These caches are only used to prevent race conditions. Race conditions happen because the writer can see multiple versions of K8s object before the applied labels show up.
# They are expected to be lost when restarting the service.
# The operation of the Metadata Writer remains correct even if it's getting restarted frequently. (Kubernetes only sends the latest version of resource for new watchers.)
# Technically, we could remove the objects from cache as soon as we see that our labels have been applied successfully.
pod_name_to_execution_id = lru.LRU(pod_name_to_execution_id_size)
workflow_name_to_context_id = lru.LRU(workflow_name_to_context_id_size)
pods_with_written_metadata = lru.LRU(pods_with_written_metadata_size)
debug_paths = collections.deque()
while True:
print("Start watching Kubernetes Pods created by Argo")
if namespace_to_watch:
pod_stream = k8s_watch.stream(
k8s_api.list_namespaced_pod,
namespace=namespace_to_watch,
label_selector=ARGO_WORKFLOW_LABEL_KEY,
timeout_seconds=1800, # Sometimes watch gets stuck
_request_timeout=2000, # Sometimes HTTP GET gets stuck
)
else:
pod_stream = k8s_watch.stream(
k8s_api.list_pod_for_all_namespaces,
label_selector=ARGO_WORKFLOW_LABEL_KEY,
timeout_seconds=1800, # Sometimes watch gets stuck
_request_timeout=2000, # Sometimes HTTP GET gets stuck
)
for event in pod_stream:
try:
obj = event['object']
print('Kubernetes Pod event: ', event['type'], obj.metadata.name, obj.metadata.resource_version)
if event['type'] == 'ERROR':
print(event)
pod_name = obj.metadata.name
# Logging pod changes for debugging
debug_path = '/tmp/pod_' + obj.metadata.name + '_' + obj.metadata.resource_version
with open(debug_path, 'w') as f:
f.write(yaml.dump(obj.to_dict()))
debug_paths.append(debug_path)
# Do some housekeeping, ensure we only keep a fixed size buffer of debug files so we don't
# grow the disk size indefinitely for long running pods.
if len(debug_paths) > debug_files_size:
os.remove(debug_paths.popleft())
assert obj.kind == 'Pod'
if METADATA_WRITTEN_LABEL_KEY in obj.metadata.labels:
continue
# Skip TFX pods - they have their own metadata writers
if is_tfx_pod(obj):
continue
# Skip KFP v2 pods - they have their own metadat writers
if is_kfp_v2_pod(obj):
continue
argo_workflow_name = obj.metadata.labels[ARGO_WORKFLOW_LABEL_KEY] # Should exist due to initial filtering
argo_template = {}
for env in obj.spec.containers[0].env:
if env.name == ARGO_TEMPLATE_ENV_KEY:
argo_template = json.loads(env.value)
break
# Should we throw error instead if argo template not found?
argo_template_name = argo_template.get('name', '')
component_name = argo_template_name
component_version = component_name
argo_output_name_to_type = {}
if KFP_COMPONENT_SPEC_ANNOTATION_KEY in obj.metadata.annotations:
component_spec_text = obj.metadata.annotations[KFP_COMPONENT_SPEC_ANNOTATION_KEY]
component_spec = json.loads(component_spec_text)
component_spec_digest = hashlib.sha256(component_spec_text.encode()).hexdigest()
component_name = component_spec.get('name', component_name)
component_version = component_name + '@sha256=' + component_spec_digest
output_name_to_type = {output['name']: output.get('type', None) for output in component_spec.get('outputs', [])}
argo_output_name_to_type = {output_name_to_argo(k): v for k, v in output_name_to_type.items() if v}
if obj.metadata.name in pod_name_to_execution_id:
execution_id = pod_name_to_execution_id[obj.metadata.name]
context_id = workflow_name_to_context_id[argo_workflow_name]
elif METADATA_EXECUTION_ID_LABEL_KEY in obj.metadata.labels:
execution_id = int(obj.metadata.labels[METADATA_EXECUTION_ID_LABEL_KEY])
context_id = int(obj.metadata.labels[METADATA_CONTEXT_ID_LABEL_KEY])
print('Found execution id: {}, context id: {} for pod {}.'.format(execution_id, context_id, obj.metadata.name))
else:
run_context = get_or_create_run_context(
store=mlmd_store,
run_id=argo_workflow_name, # We can switch to internal run IDs once backend starts adding them
)
# Saving input paramater arguments
execution_custom_properties = {}
if KFP_PARAMETER_ARGUMENTS_ANNOTATION_KEY in obj.metadata.annotations:
parameter_arguments_json = obj.metadata.annotations[KFP_PARAMETER_ARGUMENTS_ANNOTATION_KEY]
try:
parameter_arguments = json.loads(parameter_arguments_json)
for paramater_name, parameter_value in parameter_arguments.items():
execution_custom_properties['input:' + paramater_name] = parameter_value
except Exception:
pass
# Adding new execution to the database
execution = create_new_execution_in_existing_run_context(
store=mlmd_store,
context_id=run_context.id,
execution_type_name=KFP_EXECUTION_TYPE_NAME_PREFIX + component_version,
pod_name=pod_name,
pipeline_name=argo_workflow_name,
run_id=argo_workflow_name,
instance_id=component_name,
custom_properties=execution_custom_properties,
)
argo_input_artifacts = argo_template.get('inputs', {}).get('artifacts', [])
input_artifact_ids = []
for argo_artifact in argo_input_artifacts:
artifact_uri = argo_artifact_to_uri(argo_artifact)
if not artifact_uri:
continue
input_name = argo_artifact.get('path', '') # Every artifact should have a path in Argo
input_artifact_path_prefix = '/tmp/inputs/'
input_artifact_path_postfix = '/data'
if input_name.startswith(input_artifact_path_prefix):
input_name = input_name[len(input_artifact_path_prefix):]
if input_name.endswith(input_artifact_path_postfix):
input_name = input_name[0: -len(input_artifact_path_postfix)]
artifact = link_execution_to_input_artifact(
store=mlmd_store,
execution_id=execution.id,
uri=artifact_uri,
input_name=input_name,
)
if artifact is None:
# TODO: Maybe there is a better way to handle missing upstream artifacts
continue
input_artifact_ids.append(dict(
id=artifact.id,
name=input_name,
uri=artifact.uri,
))
print('Found Input Artifact: ' + str(dict(
input_name=input_name,
id=artifact.id,
uri=artifact.uri,
)))
execution_id = execution.id
context_id = run_context.id
obj.metadata.labels[METADATA_EXECUTION_ID_LABEL_KEY] = execution_id
obj.metadata.labels[METADATA_CONTEXT_ID_LABEL_KEY] = context_id
metadata_to_add = {
'labels': {
METADATA_EXECUTION_ID_LABEL_KEY: str(execution_id),
METADATA_CONTEXT_ID_LABEL_KEY: str(context_id),
},
'annotations': {
METADATA_INPUT_ARTIFACT_IDS_ANNOTATION_KEY: json.dumps(input_artifact_ids),
},
}
patch_pod_metadata(
namespace=obj.metadata.namespace,
pod_name=obj.metadata.name,
patch=metadata_to_add,
)
pod_name_to_execution_id[obj.metadata.name] = execution_id
workflow_name_to_context_id[argo_workflow_name] = context_id
print('New execution id: {}, context id: {} for pod {}.'.format(execution_id, context_id, obj.metadata.name))
print('Execution: ' + str(dict(
context_id=context_id,
context_name=argo_workflow_name,
execution_id=execution_id,
execution_name=obj.metadata.name,
component_name=component_name,
)))
# TODO: Log input parameters as execution options.
# Unfortunately, DSL compiler loses the information about inputs and their arguments.
if (
obj.metadata.name not in pods_with_written_metadata
and (
obj.metadata.labels.get(ARGO_COMPLETED_LABEL_KEY, 'false') == 'true'
or ARGO_OUTPUTS_ANNOTATION_KEY in obj.metadata.annotations
)
):
artifact_ids = []
if ARGO_OUTPUTS_ANNOTATION_KEY in obj.metadata.annotations: # Should be present
argo_outputs = json.loads(obj.metadata.annotations[ARGO_OUTPUTS_ANNOTATION_KEY])
argo_output_artifacts = {}
for artifact in argo_outputs.get('artifacts', []):
art_name = artifact['name']
output_prefix = argo_template_name + '-'
if art_name.startswith(output_prefix):
art_name = art_name[len(output_prefix):]
argo_output_artifacts[art_name] = artifact
output_artifacts = []
for name, art in argo_output_artifacts.items():
artifact_uri = argo_artifact_to_uri(art)
if not artifact_uri:
continue
artifact_type_name = argo_output_name_to_type.get(name, 'NoType') # Cannot be None or ''
print('Adding Output Artifact: ' + str(dict(
output_name=name,
uri=artifact_uri,
type=artifact_type_name,
)))
artifact = create_new_output_artifact(
store=mlmd_store,
execution_id=execution_id,
context_id=context_id,
uri=artifact_uri,
type_name=artifact_type_name,
output_name=name,
#run_id='Context_' + str(context_id) + '_run',
run_id=argo_workflow_name,
argo_artifact=art,
)
artifact_ids.append(dict(
id=artifact.id,
name=name,
uri=artifact_uri,
type=artifact_type_name,
))
metadata_to_add = {
'labels': {
METADATA_WRITTEN_LABEL_KEY: 'true',
},
'annotations': {
METADATA_OUTPUT_ARTIFACT_IDS_ANNOTATION_KEY: json.dumps(artifact_ids),
},
}
patch_pod_metadata(
namespace=obj.metadata.namespace,
pod_name=obj.metadata.name,
patch=metadata_to_add,
)
pods_with_written_metadata[obj.metadata.name] = None
except Exception as e:
import traceback
print(traceback.format_exc())
| {
"content_hash": "796537883b1a4167537706019b37c986",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 173,
"avg_line_length": 44.346354166666664,
"alnum_prop": 0.5687944095366727,
"repo_name": "kubeflow/pipelines",
"id": "8f21ff6256c57fabb69a19092eeed514296e5f9d",
"size": "17615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/metadata_writer/src/metadata_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
} |
from .base import *
DEBUG = False
ADMINS = (
(get_env('ALEX_ADMIN_NAME'), get_env('ALEX_ADMIN_EMAIL')),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['alpine-explorer.eu', 'www.alpine-explorer.eu']
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_env('ALEX_DB_NAME'),
'USER': get_env('ALEX_DB_USER'),
'PASSWORD': get_env('ALEX_DB_PASSWORD'),
'HOST': get_env('ALEX_DB_HOST'),
'PORT': get_env('ALEX_DB_PORT'),
}
}
MEDIA_ROOT = '/usr/local/nginx/html/media/'
STATIC_ROOT = '/usr/local/nginx/html/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
#INSTALLED_APPS += ("debug_toolbar", )
INTERNAL_IPS = ("127.0.0.1", )
EMAIL_HOST = get_env('ALEX_EMAIL_HOST')
EMAIL_PORT = get_env('ALEX_EMAIL_PORT')
EMAIL_HOST_USER = get_env('ALEX_EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_env('ALEX_EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = get_env('ALEX_EMAIL_USE_TLS')
| {
"content_hash": "cac3f7c780fb377fa7fe505489af6d54",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 30.017857142857142,
"alnum_prop": 0.6145151695419393,
"repo_name": "wodo/alpine-explorer",
"id": "69673d07c41baac668f3e027541e8f18c660524e",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alex/config/settings/production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6486"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserInfo(models.Model):
user = models.OneToOneField(User,primary_key=True)
class BookInfo(models.Model):
isbn = models.CharField(max_length = 13)
pic_url = models.URLField(max_length = 100,null=True,blank=True)
title = models.CharField(max_length = 100,null=True,blank=True)
pages = models.IntegerField(default = 0)
author = models.CharField(max_length = 100,null=True,blank=True)
price = models.CharField(max_length = 20)
publisher=models.CharField(max_length=100,null=True,blank=True)
pubdate = models.CharField(max_length=20,null=True,blank=True)
has_data = models.BooleanField(default = False)
doubanid= models.CharField(max_length = 20, null = True,blank=True)
def __unicode__(self):
return self.title
class GoodsInfo(models.Model):
seller = models.ForeignKey(User,related_name="goods")
book = models.ForeignKey(BookInfo,related_name = "goods")
price = models.DecimalField(max_digits=10,decimal_places=2)
quality = models.IntegerField(default = 100)
description = models.CharField(max_length = 500, null = True, blank = True)
sell_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
numbers = models.IntegerField(default = 1)
source = models.URLField(max_length = 100, null = True, blank = True)
#1 means on sell. 2 means sold.
goods_state = models.IntegerField(default = 1)
#log some important system informaiton
#class SystemInfo(models.Model):
#last_crawl_douban_time = models.DateTimeField(
| {
"content_hash": "00d23c9df7e4740e759043aa862a74ed",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 40.58536585365854,
"alnum_prop": 0.7193509615384616,
"repo_name": "zearom32/SmartBooks",
"id": "c7ee940d6f6a15f86b04faa14bbd24cc614598d4",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "books/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41725"
},
{
"name": "HTML",
"bytes": "46531"
},
{
"name": "JavaScript",
"bytes": "74673"
},
{
"name": "Python",
"bytes": "29004"
}
],
"symlink_target": ""
} |
"""distutils.command.install_scripts
Implements the Distutils 'install_scripts' command, for installing
Python scripts."""
# contributed by Bastian Kleineidam
__revision__ = "$Id: install_scripts.py 68943 2009-01-25 22:09:10Z tarek.ziade $"
import os
from distutils.core import Command
from distutils import log
from stat import ST_MODE
class install_scripts (Command):
description = "install scripts (Python or otherwise)"
user_options = [
('install-dir=', 'd', "directory to install scripts to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.build_dir = None
self.skip_build = None
def finalize_options (self):
self.set_undefined_options('build', ('build_scripts', 'build_dir'))
self.set_undefined_options('install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run (self):
if not self.skip_build:
self.run_command('build_scripts')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if hasattr(os, 'chmod'):
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
log.info("changing mode of %s", file)
else:
mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
log.info("changing mode of %s to %o", file, mode)
os.chmod(file, mode)
def get_inputs (self):
return self.distribution.scripts or []
def get_outputs(self):
return self.outfiles or []
# class install_scripts
| {
"content_hash": "5e44adc715e7edcb858fb3d8ac39f704",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 81,
"avg_line_length": 33.28125,
"alnum_prop": 0.5615023474178403,
"repo_name": "liangazhou/django-rdp",
"id": "d0f4a2ea456e81a68ce51b2c2133e204e219ad6d",
"size": "2130",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "packages/PyDev/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/distutils/command/install_scripts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "22310"
},
{
"name": "CSS",
"bytes": "5463444"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "Groff",
"bytes": "450"
},
{
"name": "HTML",
"bytes": "439341404"
},
{
"name": "JavaScript",
"bytes": "19561573"
},
{
"name": "PHP",
"bytes": "94083"
},
{
"name": "Perl",
"bytes": "9844"
},
{
"name": "Python",
"bytes": "8069"
},
{
"name": "Shell",
"bytes": "11480"
},
{
"name": "XSLT",
"bytes": "224454"
}
],
"symlink_target": ""
} |
a = {'x': 1, 'z': 3 }
b = {'y': 2, 'z': 4 }
# (a) Simple example of combining
from collections import ChainMap
c = ChainMap(a,b)
print(c['x']) # Outputs 1 (from a)
print(c['y']) # Outputs 2 (from b)
print(c['z']) # Outputs 3 (from a)
# Output some common values
print('len(c):', len(c))
print('c.keys():', list(c.keys()))
print('c.values():', list(c.values()))
# Modify some values
c['z'] = 10
c['w'] = 40
del c['x']
print("a:", a)
# Example of stacking mappings (like scopes)
values = ChainMap()
values['x'] = 1
# Add a new mapping
values = values.new_child()
values['x'] = 2
# Add a new mapping
values = values.new_child()
values['x'] = 3
print(values)
print(values['x'])
# Discard last mapping
values = values.parents
print(values)
print(values['x'])
# Discard last mapping
values = values.parents
print(values)
print(values['x'])
| {
"content_hash": "8b98b4779668f2ef2b709c4cb9ac16db",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 44,
"avg_line_length": 18.361702127659573,
"alnum_prop": 0.6164542294322132,
"repo_name": "SysCompass/compass-adapters",
"id": "6bd153385a1a5d8f378d540add36d2d3bf0ebddd",
"size": "924",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "chef/cookbooks/python/src/1/working_with_multiple_mappings_as_a_single_mapping/example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21125"
},
{
"name": "CSS",
"bytes": "111630"
},
{
"name": "Perl",
"bytes": "848"
},
{
"name": "Python",
"bytes": "208453"
},
{
"name": "Ruby",
"bytes": "1406351"
},
{
"name": "Shell",
"bytes": "5072"
}
],
"symlink_target": ""
} |
"""
An interface to the Pluggable Authentication Modules (PAM) library,
written in pure Python (using ctypes).
"""
import io
import os
import sys
from distutils.core import setup
if 'bdist_wheel' in sys.argv:
import setuptools
with open('pamela.py') as f:
for line in f:
if line.startswith('__version__'):
version_ns = {}
exec(line, version_ns)
version = version_ns['__version__']
setup(name='pamela',
version=version,
description="PAM interface using ctypes",
long_description=__doc__,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Systems Administration :: Authentication/Directory"
],
keywords=['pam', 'authentication'],
author='Min RK',
author_email='benjaminrk@gmail.com',
url='http://github.com/minrk/pamela',
license='MIT',
py_modules=["pamela"],
)
| {
"content_hash": "e4def959a0b493ac9cc68698d5e031d7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 81,
"avg_line_length": 29.523809523809526,
"alnum_prop": 0.5959677419354839,
"repo_name": "rgbkrk/pamela",
"id": "fd03f697bb78995f001cd47fea0f2a2b88f3c98d",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13089"
}
],
"symlink_target": ""
} |
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.contacts import Contacts
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
# lazy imports for factory so that widgets can be used in kv
Factory.register('InstallWizard',
module='electrum_gui.kivy.uix.dialogs.installwizard')
Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def on_quotes(self, d):
#Logger.info("on_quotes")
pass
def on_history(self, d):
#Logger.info("on_history")
if self.history_screen:
Clock.schedule_once(lambda dt: self.history_screen.update())
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self.update_status()
if self.history_screen:
self.history_screen.update()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
if self.history_screen:
self.history_screen.update()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = run_hook('exchange_rate')
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = run_hook('exchange_rate')
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
super(ElectrumWindow, self).__init__(**kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.contacts = Contacts(self.electrum_config)
self.invoices = InvoiceStore(self.electrum_config)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet =\
Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status =\
Clock.create_trigger(self.update_status, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.contacts):
key = self.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
try:
text = base_decode(data, None, base=43).encode('hex')
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if self.send_screen is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.open()
def qr_dialog(self, title, data, show_text=False):
from uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def scan_qr_zxing(self, on_complete):
# uses zxing embedded lib
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
IntentIntegrator = autoclass('com.google.zxing.integration.android.IntentIntegrator')
integrator = IntentIntegrator(PythonActivity.mActivity)
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
integrator.initiateScan()
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
wizard = Factory.InstallWizard(self.electrum_config, path)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
if self._settings_dialog is None:
from uix.dialogs.settings import SettingsDialog
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.icon = "icons/electrum.png"
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network, interests)
self.tabs = self.root.ids['tabs']
def on_network(self, event, *args):
if event == 'updated':
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Not connected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status) if n !='default_wallet' else status
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
amount, fee = self.wallet.get_max_amount(self.electrum_config, inputs, (TYPE_ADDRESS, addr), None)
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, txid):
self.show_info(txid)
if ok and pr:
pr.set_paid(tx.hash())
self.invoices.save()
self.update_tab('invoices')
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
apply(f, args + (None,))
def delete_wallet(self):
from uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
def callback(pw):
Clock.schedule_once(lambda x: apply(f, args + (pw,)), 0.1)
if self._password_dialog is None:
from uix.dialogs.password_dialog import PasswordDialog
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
| {
"content_hash": "07714789fefb7a5aaf5281e23faf0f77",
"timestamp": "",
"source": "github",
"line_count": 839,
"max_line_length": 129,
"avg_line_length": 36.71275327771156,
"alnum_prop": 0.5884682812804364,
"repo_name": "aasiutin/electrum",
"id": "1f99bc9c68c6b2291ee6b81888c39e600af53974",
"size": "30802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/kivy/main_window.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3867"
},
{
"name": "Makefile",
"bytes": "844"
},
{
"name": "NSIS",
"bytes": "6901"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Protocol Buffer",
"bytes": "2354"
},
{
"name": "Python",
"bytes": "1153867"
},
{
"name": "Shell",
"bytes": "6932"
}
],
"symlink_target": ""
} |
import calendar
import Tkinter
import ttk
import tkFont
def get_calendar(locale, fwday):
# instantiate proper calendar class
if locale is None:
return calendar.TextCalendar(fwday)
else:
return calendar.LocaleTextCalendar(fwday, locale)
class Calendar(ttk.Frame):
# XXX ToDo: cget and configure
datetime = calendar.datetime.datetime
timedelta = calendar.datetime.timedelta
def __init__(self, master=None, **kw):
"""
WIDGET-SPECIFIC OPTIONS
locale, firstweekday, year, month, selectbackground,
selectforeground
"""
# remove custom options from kw before initializating ttk.Frame
fwday = kw.pop('firstweekday', calendar.MONDAY)
year = kw.pop('year', self.datetime.now().year)
month = kw.pop('month', self.datetime.now().month)
locale = kw.pop('locale', None)
sel_bg = kw.pop('selectbackground', '#ecffc4')
sel_fg = kw.pop('selectforeground', '#05640e')
self._date = self.datetime(year, month, 1)
self._selection = None # no date selected
ttk.Frame.__init__(self, master, **kw)
self._cal = get_calendar(locale, fwday)
self.__setup_styles() # creates custom styles
self.__place_widgets() # pack/grid used widgets
self.__config_calendar() # adjust calendar columns and setup tags
# configure a canvas, and proper bindings, for selecting dates
self.__setup_selection(sel_bg, sel_fg)
# store items ids, used for insertion later
self._items = [self._calendar.insert('', 'end', values='')
for _ in range(6)]
# insert dates in the currently empty calendar
self._build_calendar()
# set the minimal size for the widget
#self._calendar.bind('<Map>', self.__minsize)
def __setitem__(self, item, value):
if item in ('year', 'month'):
raise AttributeError("attribute '%s' is not writeable" % item)
elif item == 'selectbackground':
self._canvas['background'] = value
elif item == 'selectforeground':
self._canvas.itemconfigure(self._canvas.text, item=value)
else:
ttk.Frame.__setitem__(self, item, value)
def __getitem__(self, item):
if item in ('year', 'month'):
return getattr(self._date, item)
elif item == 'selectbackground':
return self._canvas['background']
elif item == 'selectforeground':
return self._canvas.itemcget(self._canvas.text, 'fill')
else:
r = ttk.tclobjs_to_py({item: ttk.Frame.__getitem__(self, item)})
return r[item]
def __setup_styles(self):
# custom ttk styles
style = ttk.Style(self.master)
arrow_layout = lambda dir: (
[('Button.focus', {'children': [('Button.%sarrow' % dir, None)]})]
)
style.layout('L.TButton', arrow_layout('left'))
style.layout('R.TButton', arrow_layout('right'))
def __place_widgets(self):
# header frame and its widgets
hframe = ttk.Frame(self)
lbtn = ttk.Button(hframe, style='L.TButton', command=self._prev_month)
rbtn = ttk.Button(hframe, style='R.TButton', command=self._next_month)
self._header = ttk.Label(hframe, width=15, anchor='center')
# the calendar
#self._calendar = ttk.Treeview(show='', selectmode='none', height=7)
self._calendar = ttk.Treeview(self, show='', selectmode='none', height=7)
# pack the widgets
hframe.pack(in_=self, side='top', pady=4, anchor='center')
lbtn.grid(in_=hframe)
self._header.grid(in_=hframe, column=1, row=0, padx=12)
rbtn.grid(in_=hframe, column=2, row=0)
self._calendar.pack(in_=self, expand=1, fill='both', side='bottom')
def __config_calendar(self):
cols = self._cal.formatweekheader(3).split()
self._calendar['columns'] = cols
self._calendar.tag_configure('header', background='grey90')
self._calendar.insert('', 'end', values=cols, tag='header')
# adjust its columns width
font = tkFont.Font(family="Helvetica",size=14,weight="bold")
maxwidth = max(font.measure(col) for col in cols)
for col in cols:
self._calendar.column(col, width=maxwidth, minwidth=maxwidth,
anchor='e')
def __setup_selection(self, sel_bg, sel_fg):
self._font = tkFont.Font(family="Helvetica",size=14,weight="bold")
self._canvas = canvas = Tkinter.Canvas(self._calendar,
background=sel_bg, borderwidth=0, highlightthickness=0)
canvas.text = canvas.create_text(0, 0, fill=sel_fg, anchor='w')
canvas.bind('<ButtonPress-1>', lambda evt: canvas.place_forget())
self._calendar.bind('<Configure>', lambda evt: canvas.place_forget())
self._calendar.bind('<ButtonPress-1>', self._pressed)
#def __minsize(self, evt):
# width, height = self._calendar.master.geometry().split('x')
# height = height[:height.index('+')]
# self._calendar.master.minsize(width, height)
def _build_calendar(self):
year, month = self._date.year, self._date.month
# update header text (Month, YEAR)
header = self._cal.formatmonthname(year, month, 0)
self._header['text'] = header.title()
# update calendar shown dates
cal = self._cal.monthdayscalendar(year, month)
for indx, item in enumerate(self._items):
week = cal[indx] if indx < len(cal) else []
fmt_week = [('%02d' % day) if day else '' for day in week]
self._calendar.item(item, values=fmt_week)
def _show_selection(self, text, bbox):
"""Configure canvas for a new selection."""
x, y, width, height = bbox
textw = self._font.measure(text)
canvas = self._canvas
canvas.configure(width=width, height=height)
canvas.coords(canvas.text, width - textw, height / 2 - 1)
canvas.itemconfigure(canvas.text, text=text)
canvas.place(in_=self._calendar, x=x, y=y)
# Callbacks
def _pressed(self, evt):
"""Clicked somewhere in the calendar."""
x, y, widget = evt.x, evt.y, evt.widget
item = widget.identify_row(y)
column = widget.identify_column(x)
if not column or not item in self._items:
# clicked in the weekdays row or just outside the columns
return
item_values = widget.item(item)['values']
if not len(item_values): # row is empty for this month
return
text = item_values[int(column[1]) - 1]
if not text: # date is empty
return
bbox = widget.bbox(item, column)
if not bbox: # calendar not visible yet
return
# update and then show selection
text = '%02d' % text
self._selection = (text, item, column)
self._show_selection(text, bbox)
def _prev_month(self):
"""Updated calendar to show the previous month."""
self._canvas.place_forget()
self._date = self._date - self.timedelta(days=1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() # reconstuct calendar
def _next_month(self):
"""Update calendar to show the next month."""
self._canvas.place_forget()
year, month = self._date.year, self._date.month
self._date = self._date + self.timedelta(
days=calendar.monthrange(year, month)[1] + 1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() # reconstruct calendar
# Properties
@property
def selection(self):
"""Return a datetime representing the current selected date."""
if not self._selection:
return None
year, month = self._date.year, self._date.month
dateArray = [year, month, int(self._selection[0]),calendar.weekday(year, month, int(self._selection[0]))]
#return self.datetime(year, month, int(self._selection[0]))
return dateArray
def print_date(ttkcal):
#print "the date is:", ttkcal.selection
return ttkcal.selection
def chooseDate(ttkcal):
print(print_date(ttkcal))
quit()
def test():
import sys
root = Tkinter.Tk()
root.title('Ttk Calendar')
ttkcal = Calendar(firstweekday=calendar.SUNDAY)
ttkcal.pack(expand=1, fill='both')
if 'win' not in sys.platform:
style = ttk.Style()
style.theme_use('clam')
Tkinter.Button(root, text="Date", command=lambda: chooseDate(ttkcal)).pack()
root.mainloop()
if __name__ == '__main__':
test()
| {
"content_hash": "8a2ff341e8b434cab141306ac5799b69",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 113,
"avg_line_length": 36.396694214876035,
"alnum_prop": 0.5977520435967303,
"repo_name": "HugoLG/SFCrimeClassification",
"id": "04c6653d725f1397c94cdde55862f5814813c2ad",
"size": "8827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calendarWidget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "24509"
},
{
"name": "Python",
"bytes": "39589"
}
],
"symlink_target": ""
} |
from django.db import models
class IncomeBenefitsManager(models.Manager):
pass
class IncomeBenefits(models.Model):
objects = IncomeBenefitsManager()
personal_id = models.CharField(max_length=15)
project_entry_id = models.CharField(max_length=15)
income_benefits_id = models.CharField(max_length=15)
information_date = models.DateField(null=True)
income_from_any_source = models.FloatField(null=True)
total_monthly_income = models.FloatField(null=True)
earned = models.NullBooleanField()
earned_amount = models.FloatField(null=True)
unemployment = models.NullBooleanField()
unemployment_amount = models.FloatField(null=True)
ssi = models.NullBooleanField()
ssi_amount = models.FloatField(null=True)
ssdi = models.NullBooleanField()
ssdi_amount = models.FloatField(null=True)
va_disability_service = models.NullBooleanField()
va_disability_service_amount = models.FloatField(null=True)
va_disability_non_service = models.NullBooleanField()
va_disability_non_service_amount = models.IntegerField(null=True)
private_disability = models.NullBooleanField()
private_disability_amount = models.IntegerField(null=True)
workers_comp = models.NullBooleanField()
workers_comp_amount = models.FloatField(null=True)
tanf = models.NullBooleanField()
tanf_amount = models.FloatField(null=True)
ga = models.NullBooleanField()
ga_amount = models.FloatField(null=True)
soc_sec_retirement = models.NullBooleanField()
soc_sec_retirement_amount = models.IntegerField(null=True)
pension = models.NullBooleanField()
pension_amount = models.FloatField(null=True)
child_support = models.NullBooleanField()
child_support_amount = models.FloatField(null=True)
alimony = models.NullBooleanField()
alimony_amount = models.FloatField(null=True)
other_income_source = models.NullBooleanField()
other_income_source_amount = models.IntegerField(null=True)
other_income_source_identify = models.CharField(max_length=126, null=True)
benefits_from_any_source = models.NullBooleanField()
snap = models.NullBooleanField()
wic = models.NullBooleanField()
tanf_child_care = models.NullBooleanField()
tanf_transportation = models.NullBooleanField()
other_tanf = models.NullBooleanField()
rental_assistance_ongoing = models.NullBooleanField()
rental_assistance_temp = models.NullBooleanField()
other_benefits_source = models.NullBooleanField()
other_benefits_source_identify = models.CharField(max_length=126, null=True)
insurance_from_any_source = models.NullBooleanField()
medicaid = models.NullBooleanField()
no_medicaid_reason = models.IntegerField(null=True)
medicare = models.NullBooleanField()
no_medicare_reason = models.IntegerField(null=True)
schip = models.NullBooleanField()
no_schip_reason = models.CharField(max_length=126, null=True)
va_medical_services = models.NullBooleanField()
no_va_med_reason = models.CharField(max_length=126, null=True)
employer_provided = models.NullBooleanField()
no_employer_provided_reason = models.CharField(max_length=126, null=True)
cobra = models.NullBooleanField()
no_cobra_reason = models.CharField(max_length=126, null=True)
private_pay = models.NullBooleanField()
no_private_pay_reason = models.CharField(max_length=126, null=True)
state_health_ins = models.NullBooleanField()
no_state_health_ins_reason = models.CharField(max_length=126, null=True)
hiv_aids_assistance = models.NullBooleanField()
no_hiv_aids_assistance_reason = models.CharField(max_length=126, null=True)
adap = models.NullBooleanField()
no_adap_reason = models.CharField(max_length=126, null=True)
data_collection_stage = models.IntegerField(null=True)
date_created = models.DateTimeField(null=False)
date_updated = models.DateTimeField(null=False)
associate_id = models.CharField(max_length=15)
| {
"content_hash": "3ab80236f5d8b6b6f54a1cf544f4c1a4",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 50.29113924050633,
"alnum_prop": 0.7435187515731185,
"repo_name": "dborstelmann/Penguins-GH6",
"id": "e9b4111327596de08e2fd6989b95c3beeaae8fb9",
"size": "3973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/models/income_benefits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6476"
},
{
"name": "HTML",
"bytes": "42818"
},
{
"name": "JavaScript",
"bytes": "69473"
},
{
"name": "Python",
"bytes": "121275"
}
],
"symlink_target": ""
} |
def test_transform(testapp):
res = testapp.get('/')
assert 'X-Transformed' in res.headers
assert 'X-After-Transform' in res.headers
res = testapp.get('/')
assert 'X-Transformed' in res.headers
assert 'X-After-Transform' in res.headers
def test_should_transform(testapp):
res = testapp.get('/', headers={'X-No-Transform': 'true'})
assert 'X-Transformed' not in res.headers
assert 'X-After-Transform' not in res.headers
def test_transform_error(testapp):
res = testapp.get('/transform_error', status=500)
assert 'X-Transformed' not in res.headers
assert 'X-After-Transform' not in res.headers
res = testapp.get('/')
assert 'X-Transformed' in res.headers
assert 'X-After-Transform' in res.headers
def test_bad_request(testapp):
res = testapp.get('/bad_request', status=400)
assert 'X-Transformed' in res.headers
assert 'X-After-Transform' in res.headers
def test_connection_close(testapp):
res = testapp.get('/connection_close')
assert 'X-Transformed' in res.headers
assert 'X-After-Transform' in res.headers
res = testapp.get('/connection_close')
assert 'X-Transformed' in res.headers
assert 'X-After-Transform' in res.headers
def test_startup_error(monkeypatch, startup_error_testapp):
def mockwarn(msg, *args):
assert b'error starting up' in args[1]
monkeypatch.setattr('subprocess_middleware.worker.log.warn', mockwarn)
res = startup_error_testapp.get('/', status=500)
assert 'X-Transformed' not in res.headers
assert 'X-After-Transform' not in res.headers
def test_reload_process(monkeypatch, testapp):
retval = False
pids = set()
def mock_reload_process(process):
pids.add(process.pid)
return retval
monkeypatch.setattr('subprocess_middleware.tests.testing._reload_process', mock_reload_process)
testapp.get('/')
testapp.get('/')
assert len(pids) == 1
retval = True
testapp.get('/')
# reload here
testapp.get('/')
assert len(pids) == 2
| {
"content_hash": "353da7ad5152bfd14a6878436336ad00",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 99,
"avg_line_length": 31.4,
"alnum_prop": 0.679078882900539,
"repo_name": "lrowe/subprocess_middleware",
"id": "3b8eb207d0fa2b586f212be0ef98fd44717191db",
"size": "2041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/subprocess_middleware/tests/test_subprocess_middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21936"
}
],
"symlink_target": ""
} |
import tornado.web
class WebHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html") | {
"content_hash": "77d864628baee9ab1b95f8535da9b810",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 29.25,
"alnum_prop": 0.717948717948718,
"repo_name": "jonepatr/affordance",
"id": "a614be7a9d5ad8ab1b618b6121c90a4821a5bede",
"size": "117",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "affordance/web_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "5415"
},
{
"name": "C#",
"bytes": "67526"
},
{
"name": "CSS",
"bytes": "17615"
},
{
"name": "CoffeeScript",
"bytes": "25443"
},
{
"name": "JavaScript",
"bytes": "18462"
},
{
"name": "Python",
"bytes": "71031"
}
],
"symlink_target": ""
} |
import io
import os
from setuptools import find_packages, setup
# This reads the __version__ variable from cirq/_version.py
__version__ = ''
exec(open('cirq-core/cirq/_version.py').read())
name = 'cirq'
description = (
'A framework for creating, editing, and invoking '
'Noisy Intermediate Scale Quantum (NISQ) circuits.'
)
# README file as long_description.
long_description = io.open('README.rst', encoding='utf-8').read()
# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.
# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence
# it will be a pre-release version on PyPi. See
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning
# for more details.
if 'CIRQ_PRE_RELEASE_VERSION' in os.environ:
__version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']
long_description = (
"**This is a development version of Cirq and may be "
"unstable.**\n\n**For the latest stable release of Cirq "
"see**\n`here <https://pypi.org/project/cirq>`__.\n\n" + long_description
)
# Sanity check
assert __version__, 'Version string cannot be empty'
# This is a pure metapackage that installs all our packages
requirements = [f'{p}=={__version__}' for p in ['cirq-core', 'cirq-google']]
dev_requirements = open('dev_tools/conf/pip-list-dev-tools.txt').readlines()
dev_requirements = [r.strip() for r in dev_requirements]
setup(
name=name,
version=__version__,
url='http://github.com/quantumlib/cirq',
author='The Cirq Developers',
author_email='cirq-dev@googlegroups.com',
python_requires=('>=3.6.0'),
install_requires=requirements,
extras_require={
'dev_env': dev_requirements,
},
license='Apache 2',
description=description,
long_description=long_description,
)
| {
"content_hash": "f0c25cdcf3bb499a6fa02bbe418ff57d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 100,
"avg_line_length": 33.67272727272727,
"alnum_prop": 0.683585313174946,
"repo_name": "balopat/Cirq",
"id": "c0c1f0b90d8c4eed1f90069ca1dba16f98e6dd8f",
"size": "2437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5923"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "Jupyter Notebook",
"bytes": "23905"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "6256825"
},
{
"name": "Shell",
"bytes": "50383"
},
{
"name": "Starlark",
"bytes": "5979"
}
],
"symlink_target": ""
} |
'''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
__all__ = ['dis', 'genops', 'optimize']
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
"""
"A pickle" is a program for a virtual pickle machine (PM, but more accurately
called an unpickling machine). It's a sequence of opcodes, interpreted by the
PM, building an arbitrarily complex Python object.
For the most part, the PM is very simple: there are no looping, testing, or
conditional instructions, no arithmetic and no function calls. Opcodes are
executed once each, from first to last, until a STOP opcode is reached.
The PM has two data areas, "the stack" and "the memo".
Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
integer object on the stack, whose value is gotten from a decimal string
literal immediately following the INT opcode in the pickle bytestream. Other
opcodes take Python objects off the stack. The result of unpickling is
whatever object is left on the stack when the final STOP opcode is executed.
The memo is simply an array of objects, or it can be implemented as a dict
mapping little integers to objects. The memo serves as the PM's "long term
memory", and the little integers indexing the memo are akin to variable
names. Some opcodes pop a stack object into the memo at a given index,
and others push a memo object at a given index onto the stack again.
At heart, that's all the PM has. Subtleties arise for these reasons:
+ Object identity. Objects can be arbitrarily complex, and subobjects
may be shared (for example, the list [a, a] refers to the same object a
twice). It can be vital that unpickling recreate an isomorphic object
graph, faithfully reproducing sharing.
+ Recursive objects. For example, after "L = []; L.append(L)", L is a
list, and L[0] is the same list. This is related to the object identity
point, and some sequences of pickle opcodes are subtle in order to
get the right result in all cases.
+ Things pickle doesn't know everything about. Examples of things pickle
does know everything about are Python's builtin scalar and container
types, like ints and tuples. They generally have opcodes dedicated to
them. For things like module references and instances of user-defined
classes, pickle's knowledge is limited. Historically, many enhancements
have been made to the pickle protocol in order to do a better (faster,
and/or more compact) job on those.
+ Backward compatibility and micro-optimization. As explained below,
pickle opcodes never go away, not even when better ways to do a thing
get invented. The repertoire of the PM just keeps growing over time.
For example, protocol 0 had two opcodes for building Python integers (INT
and LONG), protocol 1 added three more for more-efficient pickling of short
integers, and protocol 2 added two more for more-efficient pickling of
long integers (before protocol 2, the only ways to pickle a Python long
took time quadratic in the number of digits, for both pickling and
unpickling). "Opcode bloat" isn't so much a subtlety as a source of
wearying complication.
Pickle protocols:
For compatibility, the meaning of a pickle opcode never changes. Instead new
pickle opcodes get added, and each version's unpickler can handle all the
pickle opcodes in all protocol versions to date. So old pickles continue to
be readable forever. The pickler can generally be told to restrict itself to
the subset of opcodes available under previous protocol versions too, so that
users can create pickles under the current version readable by older
versions. However, a pickle does not contain its version number embedded
within it. If an older unpickler tries to read a pickle using a later
protocol, the result is most likely an exception due to seeing an unknown (in
the older unpickler) opcode.
The original pickle used what's now called "protocol 0", and what was called
"text mode" before Python 2.3. The entire pickle bytestream is made up of
printable 7-bit ASCII characters, plus the newline character, in protocol 0.
That's why it was called text mode. Protocol 0 is small and elegant, but
sometimes painfully inefficient.
The second major set of additions is now called "protocol 1", and was called
"binary mode" before Python 2.3. This added many opcodes with arguments
consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
bytes. Binary mode pickles can be substantially smaller than equivalent
text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
int as 4 bytes following the opcode, which is cheaper to unpickle than the
(perhaps) 11-character decimal string attached to INT. Protocol 1 also added
a number of opcodes that operate on many stack elements at once (like APPENDS
and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
The third major set of additions came in Python 2.3, and is called "protocol
2". This added:
- A better way to pickle instances of new-style classes (NEWOBJ).
- A way for a pickle to identify its protocol (PROTO).
- Time- and space- efficient pickling of long ints (LONG{1,4}).
- Shortcuts for small tuples (TUPLE{1,2,3}}.
- Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
- The "extension registry", a vector of popular objects that can be pushed
efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
the registry contents are predefined (there's nothing akin to the memo's
PUT).
Another independent change with Python 2.3 is the abandonment of any
pretense that it might be safe to load pickles received from untrusted
parties -- no sufficient security analysis has been done to guarantee
this and there isn't a use case that warrants the expense of such an
analysis.
To this end, all tests for __safe_for_unpickling__ or for
copy_reg.safe_constructors are removed from the unpickling code.
References to these variables in the descriptions below are to be seen
as describing unpickling in Python 2.2 and before.
"""
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import StringIO
>>> read_uint1(StringIO.StringIO('\xff'))
255
"""
data = f.read(1)
if data:
return ord(data)
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import StringIO
>>> read_uint2(StringIO.StringIO('\xff\x00'))
255
>>> read_uint2(StringIO.StringIO('\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import StringIO
>>> read_int4(StringIO.StringIO('\xff\x00\x00\x00'))
255
>>> read_int4(StringIO.StringIO('\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import StringIO
>>> read_stringnl(StringIO.StringIO("'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(StringIO.StringIO("\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around ''
>>> read_stringnl(StringIO.StringIO("\n"), stripquotes=False)
''
>>> read_stringnl(StringIO.StringIO("''\n"))
''
>>> read_stringnl(StringIO.StringIO('"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(StringIO.StringIO(r"'a\n\\b\x00c\td'" + "\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in "'\"":
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
# I'm not sure when 'string_escape' was added to the std codecs; it's
# crazy not to use it if it's there.
if decode:
data = data.decode('string_escape')
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, decode=False, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import StringIO
>>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string4(f):
r"""
>>> import StringIO
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x00abc"))
''
>>> read_string4(StringIO.StringIO("\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_string1(f):
r"""
>>> import StringIO
>>> read_string1(StringIO.StringIO("\x00"))
''
>>> read_string1(StringIO.StringIO("\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import StringIO
>>> read_unicodestringnl(StringIO.StringIO("abc\uabcd\njunk"))
u'abc\uabcd'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return unicode(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring4(f):
r"""
>>> import StringIO
>>> s = u'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
'abcd\xea\xaf\x8d'
>>> n = chr(len(enc)) + chr(0) * 3 # little-endian 4-byte length
>>> t = read_unicodestring4(StringIO.StringIO(n + enc + 'junk'))
>>> s == t
True
>>> read_unicodestring4(StringIO.StringIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("unicodestring4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return unicode(data, 'utf-8')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import StringIO
>>> read_decimalnl_short(StringIO.StringIO("1234\n56"))
1234
>>> read_decimalnl_short(StringIO.StringIO("1234L\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' not allowed in '1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s.endswith("L"):
raise ValueError("trailing 'L' not allowed in %r" % s)
# It's not necessarily true that the result fits in a Python short int:
# the pickle may have been written on a 64-bit box. There's also a hack
# for True and False here.
if s == "00":
return False
elif s == "01":
return True
try:
return int(s)
except OverflowError:
return long(s)
def read_decimalnl_long(f):
r"""
>>> import StringIO
>>> read_decimalnl_long(StringIO.StringIO("1234\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' required in '1234'
Someday the trailing 'L' will probably go away from this output.
>>> read_decimalnl_long(StringIO.StringIO("1234L\n56"))
1234L
>>> read_decimalnl_long(StringIO.StringIO("123456789012345678901234L\n6"))
123456789012345678901234L
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if not s.endswith("L"):
raise ValueError("trailing 'L' required in %r" % s)
return long(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import StringIO
>>> read_floatnl(StringIO.StringIO("-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import StringIO, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(StringIO.StringIO(raw + "\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and cPickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import StringIO
>>> read_long1(StringIO.StringIO("\x00"))
0L
>>> read_long1(StringIO.StringIO("\x02\xff\x00"))
255L
>>> read_long1(StringIO.StringIO("\x02\xff\x7f"))
32767L
>>> read_long1(StringIO.StringIO("\x02\x00\xff"))
-256L
>>> read_long1(StringIO.StringIO("\x02\x00\x80"))
-32768L
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import StringIO
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x00"))
255L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x7f"))
32767L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\xff"))
-256L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\x80"))
-32768L
>>> read_long1(StringIO.StringIO("\x00\x00\x00\x00"))
0L
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the long 0L, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = StackObject(
name='int',
obtype=int,
doc="A short (as opposed to long) Python integer object.")
pylong = StackObject(
name='long',
obtype=long,
doc="A long (as opposed to short) Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, long, bool),
doc="A Python integer object (short or long), or "
"a Python bool.")
pybool = StackObject(
name='bool',
obtype=(bool,),
doc="A Python bool object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pystring = StackObject(
name='str',
obtype=str,
doc="A Python string object.")
pyunicode = StackObject(
name='unicode',
obtype=unicode,
doc="A Python Unicode string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjuction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= 2
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pylong],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pystring],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. IOW:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops two values off the stack and pushes a tuple
of length 2 whose items are those values back onto it. IOW:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops three values off the stack and pushes a tuple
of length 3 whose items are those values back onto it. IOW:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-teriminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte signed
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=int4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
signed little-endian integer following.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If type(callable) is not ClassType, REDUCE complains unless the
callable has been registered with the copy_reg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
This may raise RuntimeError in restricted execution mode (which
disallows access to __dict__ directly); in that case, the object
is updated instead via
for k, v in argument.items():
anyobject[k] = v
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ It's an old-style class object (the type of the class object is
ClassType).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done. In
restricted execution mode it can fail (assignment to __class__ is
disallowed), and I'm not really sure what happens then -- it looks
like the code ends up calling the class object's __init__ anyway,
via falling into the next case.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug; cPickle
requires the attribute to be true). If __safe_for_unpickling__
doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug; cPickle does test __safe_for_unpickling__). See INST for
the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
import pickle, re
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print "skipping %r: it doesn't look like an opcode name" % name
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, str) or len(picklecode) != 1:
if verbose:
print ("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode))
continue
if picklecode in copy:
if verbose:
print "checking name %r w/ code %r for consistency" % (
name, picklecode)
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a string object,
it's wrapped in a StringIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
import cStringIO as StringIO
if isinstance(pickle, str):
pickle = StringIO.StringIO(pickle)
if hasattr(pickle, "tell"):
getpos = pickle.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = pickle.read(1)
opcode = code2op.get(code)
if opcode is None:
if code == "":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
pos is None and "<unknown>" or pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(pickle)
yield opcode, arg, pos
if code == '.':
assert opcode.name == 'STOP'
break
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
gets = set() # set of args used by a GET opcode
puts = [] # (arg, startpos, stoppos) for the PUT opcodes
prevpos = None # set to pos if previous opcode was a PUT
for opcode, arg, pos in genops(p):
if prevpos is not None:
puts.append((prevarg, prevpos, pos))
prevpos = None
if 'PUT' in opcode.name:
prevarg, prevpos = arg, pos
elif 'GET' in opcode.name:
gets.add(arg)
# Copy the pickle string except for PUTS without a corresponding GET
s = []
i = 0
for arg, start, stop in puts:
j = stop if (arg in gets) else start
s.append(p[i:j])
i = stop
s.append(p[i:])
return ''.join(s)
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg indentlevel is the number of blanks by which to indent
a new MARK level. It defaults to 4.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpicker memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
for opcode, arg, pos in genops(pickle):
if pos is not None:
print >> out, "%5d:" % pos,
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT"):
assert arg is not None
if arg in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[arg] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
print >> out, line
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print >> out, "highest protocol among opcodes =", maxproto
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {'abc': u"def"}]
>>> pkl = pickle.dumps(x, 0)
>>> dis(pkl)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: I INT 1
8: a APPEND
9: I INT 2
12: a APPEND
13: ( MARK
14: I INT 3
17: I INT 4
20: t TUPLE (MARK at 13)
21: p PUT 1
24: a APPEND
25: ( MARK
26: d DICT (MARK at 25)
27: p PUT 2
30: S STRING 'abc'
37: p PUT 3
40: V UNICODE u'def'
45: p PUT 4
48: s SETITEM
49: a APPEND
50: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl = pickle.dumps(x, 1)
>>> dis(pkl)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: U SHORT_BINSTRING 'abc'
24: q BINPUT 3
26: X BINUNICODE u'def'
34: q BINPUT 4
36: s SETITEM
37: e APPENDS (MARK at 3)
38: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: i INST 'pickletools _Example' (MARK at 5)
28: p PUT 1
31: ( MARK
32: d DICT (MARK at 31)
33: p PUT 2
36: S STRING 'value'
45: p PUT 3
48: I INT 42
52: s SETITEM
53: b BUILD
54: a APPEND
55: g GET 1
58: a APPEND
59: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: ( MARK
5: c GLOBAL 'pickletools _Example'
27: q BINPUT 1
29: o OBJ (MARK at 4)
30: q BINPUT 2
32: } EMPTY_DICT
33: q BINPUT 3
35: U SHORT_BINSTRING 'value'
42: q BINPUT 4
44: K BININT1 42
46: s SETITEM
47: b BUILD
48: h BINGET 2
50: e APPENDS (MARK at 3)
51: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> from StringIO import StringIO
>>> f = StringIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| {
"content_hash": "b6b9de2d5986fc864ed0e3e5c39bb97f",
"timestamp": "",
"source": "github",
"line_count": 2271,
"max_line_length": 79,
"avg_line_length": 32.73800088066931,
"alnum_prop": 0.5914079733146823,
"repo_name": "DecipherOne/Troglodyte",
"id": "065498c2a13b98b5caa95af20fdf1de4d7d6c430",
"size": "74348",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Trog Build Dependencies/Python26/Lib/pickletools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "586396"
},
{
"name": "C++",
"bytes": "697696"
},
{
"name": "CSS",
"bytes": "837"
},
{
"name": "Python",
"bytes": "14516232"
},
{
"name": "Shell",
"bytes": "127"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.logger import Logger
from resource_management.core import shell
__all__ = ["check_process_status"]
import os
def check_process_status(pid_file):
"""
Function checks whether process is running.
Process is considered running, if pid file exists, and process with
a pid, mentioned in pid file is running
If process is not running, will throw ComponentIsNotRunning exception
@param pid_file: path to service pid file
"""
from resource_management.core import sudo
if not pid_file or not os.path.isfile(pid_file):
Logger.info("Pid file {0} is empty or does not exist".format(str(pid_file)))
raise ComponentIsNotRunning()
try:
pid = int(sudo.read_file(pid_file))
except:
Logger.info("Pid file {0} does not exist or does not contain a process id number".format(pid_file))
raise ComponentIsNotRunning()
try:
# Kill will not actually kill the process
# From the doc:
# If sig is 0, then no signal is sent, but error checking is still
# performed; this can be used to check for the existence of a
# process ID or process group ID.
sudo.kill(pid, 0)
except OSError:
Logger.info("Process with pid {0} is not running. Stale pid file"
" at {1}".format(pid, pid_file))
raise ComponentIsNotRunning()
def wait_process_stopped(pid_file):
"""
Waits until component is actually stopped (check is performed using
check_process_status() method.
"""
import time
component_is_stopped = False
counter = 0
while not component_is_stopped:
try:
if counter % 10 == 0:
Logger.logger.info("Waiting for actual component stop")
check_process_status(pid_file)
time.sleep(1)
counter += 1
except ComponentIsNotRunning, e:
Logger.logger.debug(" reports ComponentIsNotRunning")
component_is_stopped = True
| {
"content_hash": "9704e63c8c9c00d32ca1579df53ce43f",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 103,
"avg_line_length": 34.0375,
"alnum_prop": 0.7278736687477048,
"repo_name": "sekikn/ambari",
"id": "ac54bc9d6eb807979a07b5fe52f00ad605376e8f",
"size": "2745",
"binary": false,
"copies": "4",
"ref": "refs/heads/trunk",
"path": "ambari-common/src/main/python/resource_management/libraries/functions/check_process_status.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "22734"
},
{
"name": "C",
"bytes": "109499"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "CSS",
"bytes": "616806"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "Dockerfile",
"bytes": "8117"
},
{
"name": "HTML",
"bytes": "3725781"
},
{
"name": "Handlebars",
"bytes": "1594385"
},
{
"name": "Java",
"bytes": "26670585"
},
{
"name": "JavaScript",
"bytes": "14647486"
},
{
"name": "Jinja",
"bytes": "147938"
},
{
"name": "Less",
"bytes": "303080"
},
{
"name": "Makefile",
"bytes": "2407"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "298247"
},
{
"name": "PowerShell",
"bytes": "2047735"
},
{
"name": "Python",
"bytes": "7226684"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Shell",
"bytes": "350773"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim Script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "1133"
}
],
"symlink_target": ""
} |
"""Tests for http/websocket.py"""
import base64
import hashlib
import os
import struct
import unittest
import unittest.mock
import aiohttp
from aiohttp import websocket, multidict, protocol, errors
class WebsocketParserTests(unittest.TestCase):
def test_parse_frame(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
p.send(struct.pack('!BB', 0b00000001, 0b00000001))
try:
p.send(b'1')
except StopIteration as exc:
fin, opcode, payload = exc.value
self.assertEqual((0, 1, b'1'), (fin, opcode, payload))
def test_parse_frame_length0(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
try:
p.send(struct.pack('!BB', 0b00000001, 0b00000000))
except StopIteration as exc:
fin, opcode, payload = exc.value
self.assertEqual((0, 1, b''), (fin, opcode, payload))
def test_parse_frame_length2(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
p.send(struct.pack('!BB', 0b00000001, 126))
p.send(struct.pack('!H', 4))
try:
p.send(b'1234')
except StopIteration as exc:
fin, opcode, payload = exc.value
self.assertEqual((0, 1, b'1234'), (fin, opcode, payload))
def test_parse_frame_length4(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
p.send(struct.pack('!BB', 0b00000001, 127))
p.send(struct.pack('!Q', 4))
try:
p.send(b'1234')
except StopIteration as exc:
fin, opcode, payload = exc.value
self.assertEqual((0, 1, b'1234'), (fin, opcode, payload))
def test_parse_frame_mask(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
p.send(struct.pack('!BB', 0b00000001, 0b10000001))
p.send(b'0001')
try:
p.send(b'1')
except StopIteration as exc:
fin, opcode, payload = exc.value
self.assertEqual((0, 1, b'\x01'), (fin, opcode, payload))
def test_parse_frame_header_reversed_bits(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
self.assertRaises(
websocket.WebSocketError,
p.send, struct.pack('!BB', 0b01100000, 0b00000000))
def test_parse_frame_header_control_frame(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
self.assertRaises(
websocket.WebSocketError,
p.send, struct.pack('!BB', 0b00001000, 0b00000000))
def test_parse_frame_header_continuation(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
self.assertRaises(
websocket.WebSocketError,
p.send, struct.pack('!BB', 0b00000000, 0b00000000))
def test_parse_frame_header_new_data_err(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
self.assertRaises(
websocket.WebSocketError,
p.send, struct.pack('!BB', 0b000000000, 0b00000000))
def test_parse_frame_header_payload_size(self):
buf = aiohttp.ParserBuffer()
p = websocket.parse_frame(buf)
next(p)
self.assertRaises(
websocket.WebSocketError,
p.send, struct.pack('!BB', 0b10001000, 0b01111110))
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_ping_frame(self, m_parse_frame):
def parse_frame(buf):
yield
return (1, websocket.OPCODE_PING, b'data')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
try:
p.send(b'')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, (websocket.OPCODE_PING, b'data', ''))
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_pong_frame(self, m_parse_frame):
def parse_frame(buf):
yield
return (1, websocket.OPCODE_PONG, b'data')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
try:
p.send(b'')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, (websocket.OPCODE_PONG, b'data', ''))
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_close_frame(self, m_parse_frame):
def parse_frame(buf):
yield
return (1, websocket.OPCODE_CLOSE, b'')
m_parse_frame.side_effect = parse_frame
p = websocket.parse_message(aiohttp.ParserBuffer())
next(p)
try:
p.send(b'')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, (websocket.OPCODE_CLOSE, 0, ''))
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_close_frame_info(self, m_parse_frame):
def parse_frame(buf):
yield
return (1, websocket.OPCODE_CLOSE, b'0112345')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
try:
p.send(b'')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, (websocket.OPCODE_CLOSE, 12337, b'12345'))
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_close_frame_invalid(self, m_parse_frame):
def parse_frame(buf):
yield
return (1, websocket.OPCODE_CLOSE, b'1')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
self.assertRaises(websocket.WebSocketError, p.send, b'')
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_unknown_frame(self, m_parse_frame):
def parse_frame(buf):
yield
return (1, websocket.OPCODE_CONTINUATION, b'')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
self.assertRaises(websocket.WebSocketError, p.send, b'')
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_simple_text(self, m_parse_frame):
def parse_frame(buf):
yield
return (1, websocket.OPCODE_TEXT, b'text')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
try:
p.send(b'')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, (websocket.OPCODE_TEXT, 'text', ''))
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_simple_binary(self, m_parse_frame):
def parse_frame(buf):
yield
return (1, websocket.OPCODE_BINARY, b'binary')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
try:
p.send(b'')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, (websocket.OPCODE_BINARY, b'binary', ''))
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_continuation(self, m_parse_frame):
cur = 0
def parse_frame(buf):
nonlocal cur
yield
if cur == 0:
cur = 1
return (0, websocket.OPCODE_TEXT, b'line1')
else:
return (1, websocket.OPCODE_CONTINUATION, b'line2')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
p.send(b'')
try:
p.send(b'')
except StopIteration as exc:
res = exc.value
self.assertEqual(res, (websocket.OPCODE_TEXT, 'line1line2', ''))
@unittest.mock.patch('aiohttp.websocket.parse_frame')
def test_continuation_err(self, m_parse_frame):
cur = 0
def parse_frame(buf):
nonlocal cur
yield
if cur == 0:
cur = 1
return (0, websocket.OPCODE_TEXT, b'line1')
else:
return (1, websocket.OPCODE_TEXT, b'line2')
m_parse_frame.side_effect = parse_frame
buf = aiohttp.ParserBuffer()
p = websocket.parse_message(buf)
next(p)
p.send(b'')
self.assertRaises(websocket.WebSocketError, p.send, b'')
@unittest.mock.patch('aiohttp.websocket.parse_message')
def test_parser(self, m_parse_message):
cur = 0
def parse_message(buf):
nonlocal cur
yield
if cur == 0:
cur = 1
return websocket.Message(websocket.OPCODE_TEXT, b'line1', b'')
else:
return websocket.Message(websocket.OPCODE_CLOSE, b'', b'')
m_parse_message.side_effect = parse_message
out = aiohttp.FlowControlDataQueue(unittest.mock.Mock())
buf = aiohttp.ParserBuffer()
p = websocket.WebSocketParser(out, buf)
next(p)
p.send(b'')
self.assertRaises(StopIteration, p.send, b'')
self.assertEqual(
(websocket.OPCODE_TEXT, b'line1', b''), out._buffer[0])
self.assertEqual(
(websocket.OPCODE_CLOSE, b'', b''), out._buffer[1])
self.assertTrue(out._eof)
def test_parser_eof(self):
out = aiohttp.FlowControlDataQueue(unittest.mock.Mock())
buf = aiohttp.ParserBuffer()
p = websocket.WebSocketParser(out, buf)
next(p)
self.assertRaises(aiohttp.EofStream, p.throw, aiohttp.EofStream)
self.assertEqual([], list(out._buffer))
class WebsocketWriterTests(unittest.TestCase):
def setUp(self):
self.transport = unittest.mock.Mock()
self.writer = websocket.WebSocketWriter(self.transport)
def test_pong(self):
self.writer.pong()
self.transport.write.assert_called_with(b'\x8a\x00')
def test_ping(self):
self.writer.ping()
self.transport.write.assert_called_with(b'\x89\x00')
def test_send_text(self):
self.writer.send(b'text')
self.transport.write.assert_called_with(b'\x81\x04text')
def test_send_binary(self):
self.writer.send('binary', True)
self.transport.write.assert_called_with(b'\x82\x06binary')
def test_send_binary_long(self):
self.writer.send(b'b' * 127, True)
self.assertTrue(
self.transport.write.call_args[0][0].startswith(b'\x82~\x00\x7fb'))
def test_send_binary_very_long(self):
self.writer.send(b'b' * 65537, True)
self.assertTrue(
self.transport.write.call_args[0][0].startswith(
b'\x82\x7f\x00\x00\x00\x00\x00\x01\x00\x01b'))
def test_close(self):
self.writer.close(1001, 'msg')
self.transport.write.assert_called_with(b'\x88\x05\x03\xe9msg')
self.writer.close(1001, b'msg')
self.transport.write.assert_called_with(b'\x88\x05\x03\xe9msg')
class WebSocketHandshakeTests(unittest.TestCase):
def setUp(self):
self.transport = unittest.mock.Mock()
self.headers = multidict.MultiDict()
self.message = protocol.RawRequestMessage(
'GET', '/path', (1, 0), self.headers, True, None)
def test_not_get(self):
self.assertRaises(
errors.HttpProcessingError,
websocket.do_handshake,
'POST', self.message.headers, self.transport)
def test_no_upgrade(self):
self.assertRaises(
errors.HttpBadRequest,
websocket.do_handshake,
self.message.method, self.message.headers, self.transport)
def test_no_connection(self):
self.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'keep-alive')])
self.assertRaises(
errors.HttpBadRequest,
websocket.do_handshake,
self.message.method, self.message.headers, self.transport)
def test_protocol_version(self):
self.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade')])
self.assertRaises(
errors.HttpBadRequest,
websocket.do_handshake,
self.message.method, self.message.headers, self.transport)
self.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '1')])
self.assertRaises(
errors.HttpBadRequest,
websocket.do_handshake,
self.message.method, self.message.headers, self.transport)
def test_protocol_key(self):
self.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '13')])
self.assertRaises(
errors.HttpBadRequest,
websocket.do_handshake,
self.message.method, self.message.headers, self.transport)
self.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '13'),
('SEC-WEBSOCKET-KEY', '123')])
self.assertRaises(
errors.HttpBadRequest,
websocket.do_handshake,
self.message.method, self.message.headers, self.transport)
sec_key = base64.b64encode(os.urandom(2))
self.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '13'),
('SEC-WEBSOCKET-KEY', sec_key.decode())])
self.assertRaises(
errors.HttpBadRequest,
websocket.do_handshake,
self.message.method, self.message.headers, self.transport)
def gen_ws_headers(self, protocols=''):
key = base64.b64encode(os.urandom(16)).decode()
hdrs = [('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '13'),
('SEC-WEBSOCKET-KEY', key)]
if protocols:
hdrs += [('SEC-WEBSOCKET-PROTOCOL', protocols)]
return hdrs, key
def test_handshake(self):
hdrs, sec_key = self.gen_ws_headers()
self.headers.extend(hdrs)
status, headers, parser, writer, protocol = websocket.do_handshake(
self.message.method, self.message.headers, self.transport)
self.assertEqual(status, 101)
self.assertIsNone(protocol)
key = base64.b64encode(
hashlib.sha1(sec_key.encode() + websocket.WS_KEY).digest())
headers = dict(headers)
self.assertEqual(headers['SEC-WEBSOCKET-ACCEPT'], key.decode())
def test_handshake_protocol(self):
'''Tests if one protocol is returned by do_handshake'''
proto = 'chat'
self.headers.extend(self.gen_ws_headers(proto)[0])
_, resp_headers, _, _, protocol = websocket.do_handshake(
self.message.method, self.message.headers, self.transport,
protocols=[proto])
self.assertEqual(protocol, proto)
# also test if we reply with the protocol
resp_headers = dict(resp_headers)
self.assertEqual(resp_headers['SEC-WEBSOCKET-PROTOCOL'], proto)
def test_handshake_protocol_agreement(self):
'''Tests if the right protocol is selected given multiple'''
best_proto = 'worse_proto'
wanted_protos = ['best', 'chat', 'worse_proto']
server_protos = 'worse_proto,chat'
self.headers.extend(self.gen_ws_headers(server_protos)[0])
_, resp_headers, _, _, protocol = websocket.do_handshake(
self.message.method, self.message.headers, self.transport,
protocols=wanted_protos)
self.assertEqual(protocol, best_proto)
@unittest.mock.patch('aiohttp.websocket.ws_logger.warning')
def test_handshake_protocol_unsupported(self, m_websocket_warn):
'''Tests if a protocol mismatch handshake warns and returns None'''
warn_called = False
def websocket_warn(msg, *fmts):
nonlocal warn_called
warn_called = True
m_websocket_warn.side_effect = websocket_warn
proto = 'chat'
self.headers.extend(self.gen_ws_headers('test')[0])
_, _, _, _, protocol = websocket.do_handshake(
self.message.method, self.message.headers, self.transport,
protocols=[proto])
self.assertTrue(warn_called, 'protocol mismatch didn’t warn')
self.assertIsNone(protocol)
| {
"content_hash": "b7e414deb84f02f8cb7b1e019f7f9a58",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 79,
"avg_line_length": 34.86991869918699,
"alnum_prop": 0.580729773840056,
"repo_name": "saghul/aiohttp",
"id": "f1be1fd112d6f11e8b2e1866ed0def34b6c5cf2a",
"size": "17158",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_websocket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7854"
},
{
"name": "Python",
"bytes": "629196"
},
{
"name": "Shell",
"bytes": "6703"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_bestine_capitol02.iff"
result.attribute_template_id = 9
result.stfName("npc_name","twilek_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "192fb3188d5a2bd7c05e3cd1421e98aa",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.7012987012987013,
"repo_name": "obi-two/Rebelion",
"id": "f3d63cad1a75eb1cee33ce7d4b446e6a5ae5f6bc",
"size": "453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_bestine_capitol02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""Utilities relating to interaction with service plans
************************************************************************
FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
SHOULD ALSO BE APPLIED TO sdk_plan IN ANY OTHER PARTNER REPOS
************************************************************************
"""
import datetime
import logging
import retrying
from typing import Any, Dict, List, Optional, Union
import sdk_cmd
import sdk_tasks
TIMEOUT_SECONDS = 15 * 60
SHORT_TIMEOUT_SECONDS = 30
MAX_NEW_TASK_FAILURES = 10
class TaskFailuresExceededException(Exception):
pass
log = logging.getLogger(__name__)
def get_deployment_plan(
service_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return get_plan(service_name=service_name, plan="deploy", timeout_seconds=timeout_seconds)
def get_recovery_plan(service_name: str, timeout_seconds: int = TIMEOUT_SECONDS) -> Dict[str, Any]:
return get_plan(service_name=service_name, plan="recovery", timeout_seconds=timeout_seconds)
def get_decommission_plan(
service_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return get_plan(service_name=service_name, plan="decommission", timeout_seconds=timeout_seconds)
def list_plans(
service_name: str,
timeout_seconds: int = TIMEOUT_SECONDS,
multiservice_name: Optional[str] = None,
) -> List:
if multiservice_name is None:
path = "/v1/plans"
else:
path = "/v1/service/{}/plans".format(multiservice_name)
result = sdk_cmd.service_request(
"GET", service_name, path, timeout_seconds=timeout_seconds
).json()
assert isinstance(result, list)
return result
def get_plan_once(
service_name: str, plan: str, multiservice_name: Optional[str] = None
) -> Dict[str, Any]:
if multiservice_name is None:
path = "/v1/plans/{}".format(plan)
else:
path = "/v1/service/{}/plans/{}".format(multiservice_name, plan)
response = sdk_cmd.service_request("GET", service_name, path, retry=False, raise_on_error=False)
if (
response.status_code != 417
): # Plan has errors: Avoid throwing an exception, return plan as-is.
response.raise_for_status()
result = response.json()
assert isinstance(result, dict)
return result
def get_plan(
service_name: str,
plan: str,
timeout_seconds: int = TIMEOUT_SECONDS,
multiservice_name: Optional[str] = None,
) -> Dict[str, Any]:
@retrying.retry(wait_fixed=1000, stop_max_delay=timeout_seconds * 1000)
def wait_for_plan() -> Dict[str, Any]:
return get_plan_once(service_name, plan, multiservice_name)
result = wait_for_plan()
assert isinstance(result, dict)
return result
def start_plan(service_name: str, plan: str, parameters: Optional[Dict[str, Any]] = None) -> None:
sdk_cmd.service_request(
"POST",
service_name,
"/v1/plans/{}/start".format(plan),
json=parameters if parameters is not None else {},
)
def force_complete_step(service_name: str, plan: str, phase: str, step: str) -> None:
sdk_cmd.service_request(
"POST",
service_name,
"/v1/plans/{}/forceComplete?phase={}&step={}".format(plan, phase, step),
)
def wait_for_completed_recovery(
service_name: str,
timeout_seconds: int = TIMEOUT_SECONDS,
multiservice_name: Optional[str] = None,
) -> Dict[str, Any]:
return wait_for_completed_plan(service_name, "recovery", timeout_seconds, multiservice_name)
def wait_for_in_progress_recovery(
service_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return wait_for_in_progress_plan(service_name, "recovery", timeout_seconds)
def wait_for_kicked_off_deployment(
service_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return wait_for_kicked_off_plan(service_name, "deploy", timeout_seconds)
def wait_for_kicked_off_recovery(
service_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return wait_for_kicked_off_plan(service_name, "recovery", timeout_seconds)
def wait_for_completed_deployment(
service_name: str,
timeout_seconds: int = TIMEOUT_SECONDS,
multiservice_name: Optional[str] = None,
) -> Dict[str, Any]:
return wait_for_completed_plan(service_name, "deploy", timeout_seconds, multiservice_name)
def wait_for_completed_plan(
service_name: str,
plan_name: str,
timeout_seconds: int = TIMEOUT_SECONDS,
multiservice_name: Optional[str] = None,
) -> Dict[str, Any]:
return wait_for_plan_status(
service_name, plan_name, "COMPLETE", timeout_seconds, multiservice_name
)
def wait_for_completed_phase(
service_name: str, plan_name: str, phase_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return wait_for_phase_status(service_name, plan_name, phase_name, "COMPLETE", timeout_seconds)
def wait_for_completed_step(
service_name: str,
plan_name: str,
phase_name: str,
step_name: str,
timeout_seconds: int = TIMEOUT_SECONDS,
) -> Dict[str, Any]:
return wait_for_step_status(
service_name, plan_name, phase_name, step_name, "COMPLETE", timeout_seconds
)
def wait_for_kicked_off_plan(
service_name: str, plan_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return wait_for_plan_status(
service_name, plan_name, ["PENDING", "STARTING", "IN_PROGRESS"], timeout_seconds
)
def wait_for_in_progress_plan(
service_name: str, plan_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return wait_for_plan_status(service_name, plan_name, "IN_PROGRESS", timeout_seconds)
def wait_for_starting_plan(
service_name: str, plan_name: str, timeout_seconds: int = TIMEOUT_SECONDS
) -> Dict[str, Any]:
return wait_for_plan_status(service_name, plan_name, "STARTING", timeout_seconds)
def wait_for_plan_status(
service_name: str,
plan_name: str,
status: Union[List[str], str],
timeout_seconds: int = TIMEOUT_SECONDS,
multiservice_name: Optional[str] = None,
) -> Dict[str, Any]:
"""Wait for a plan to have one of the specified statuses"""
if isinstance(status, str):
statuses = [status]
else:
statuses = status
initial_failures = sdk_tasks.get_failed_task_count(service_name)
wait_start = datetime.datetime.utcnow()
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout_seconds * 1000,
retry_on_result=lambda res: not res,
retry_on_exception=lambda e: not isinstance(e, TaskFailuresExceededException),
)
def fn() -> Union[Dict[str, Any], bool]:
failures = sdk_tasks.get_failed_task_count(service_name)
if failures - initial_failures > MAX_NEW_TASK_FAILURES:
log.error(
"Tasks in service %s failed %d times since starting %ds ago to wait for %s to reach %s, aborting.",
service_name,
MAX_NEW_TASK_FAILURES,
(datetime.datetime.utcnow() - wait_start).total_seconds(),
plan_name,
statuses,
)
raise TaskFailuresExceededException("Service not recoverable: {}".format(service_name))
plan = get_plan(
service_name,
plan_name,
timeout_seconds=SHORT_TIMEOUT_SECONDS,
multiservice_name=multiservice_name,
)
log.info("Waiting for %s %s plan:\n%s", status, plan_name, plan_string(plan_name, plan))
if plan and plan["status"] in statuses:
return plan
else:
return False
result = fn()
assert isinstance(result, dict)
return result
def wait_for_phase_status(
service_name: str,
plan_name: str,
phase_name: str,
status: str,
timeout_seconds: int = TIMEOUT_SECONDS,
) -> Dict[str, Any]:
@retrying.retry(
wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res
)
def fn() -> Union[Dict[str, Any], bool]:
plan = get_plan(service_name, plan_name, SHORT_TIMEOUT_SECONDS)
phase = get_phase(plan, phase_name)
log.info(
"Waiting for {} {}.{} phase:\n{}".format(
status, plan_name, phase_name, plan_string(plan_name, plan)
)
)
if phase and phase["status"] == status:
return plan
else:
return False
result = fn()
assert isinstance(result, dict)
return result
def wait_for_step_status(
service_name: str,
plan_name: str,
phase_name: str,
step_name: str,
status: str,
timeout_seconds: int = TIMEOUT_SECONDS,
) -> Dict[str, Any]:
@retrying.retry(
wait_fixed=1000, stop_max_delay=timeout_seconds * 1000, retry_on_result=lambda res: not res
)
def fn() -> Union[Dict[str, Any], bool]:
plan = get_plan(service_name, plan_name, SHORT_TIMEOUT_SECONDS)
step = get_step(get_phase(plan, phase_name), step_name)
log.info(
"Waiting for {} {}.{}.{} step:\n{}".format(
status, plan_name, phase_name, step_name, plan_string(plan_name, plan)
)
)
if step and step["status"] == status:
return plan
else:
return False
result = fn()
assert isinstance(result, dict)
return result
def recovery_plan_is_empty(service_name: str) -> bool:
plan = get_recovery_plan(service_name)
return len(plan["phases"]) == 0 and len(plan["errors"]) == 0 and plan["status"] == "COMPLETE"
def get_phase(plan: Dict[str, Any], name: str) -> Any:
return get_child(plan, "phases", name)
def get_step(phase: Dict[str, Any], name: str) -> Any:
return get_child(phase, "steps", name)
def get_all_step_names(plan: Dict[str, Any]) -> List[str]:
steps: List[str] = []
for phase in plan["phases"]:
steps += [step["name"] for step in phase["steps"]]
return steps
def get_child(parent: Dict[str, Any], children_field: str, name: str) -> Any:
if parent is None:
return None
for child in parent[children_field]:
if child["name"] == name:
return child
return None
def plan_string(plan_name: str, plan: Dict[str, Any]) -> str:
if plan is None:
return "{}=NULL!".format(plan_name)
def phase_string(phase: Dict[str, Any]) -> str:
""" Formats the phase output as follows:
deploy STARTING:
- node-deploy STARTING: node-0:[server] = STARTING, node-1:[server] = PENDING, node-2:[server] = PENDING
- node-other PENDING: somestep=PENDING
- errors: foo, bar
"""
return "\n- {} ({}): {}".format(
phase["name"],
phase["status"],
", ".join("{}={}".format(step["name"], step["status"]) for step in phase["steps"]),
)
plan_str = "{} ({}):{}".format(
plan_name, plan["status"], "".join(phase_string(phase) for phase in plan["phases"])
)
if plan.get("errors", []):
plan_str += "\n- errors: {}".format(", ".join(plan["errors"]))
return plan_str
| {
"content_hash": "ffeceb30abc6abe1cf1317c2a841af17",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 115,
"avg_line_length": 31.471910112359552,
"alnum_prop": 0.6231702963227419,
"repo_name": "mesosphere/dcos-kafka-service",
"id": "e9821c25335cc9cde2477504611710bbe63cefe5",
"size": "11204",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testing/sdk_plan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "68"
},
{
"name": "Go",
"bytes": "37932"
},
{
"name": "HTML",
"bytes": "21038"
},
{
"name": "Java",
"bytes": "43823"
},
{
"name": "Python",
"bytes": "552827"
},
{
"name": "Shell",
"bytes": "49236"
}
],
"symlink_target": ""
} |
import os
import glob
import numpy as np
from datetime import datetime
from astropy.io import fits
from .conf import read_conf
from ._version import __version__
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from PIL import Image
except ImportError:
import Image
gexiv_available = True
try:
from gi.repository import GExiv2
except ImportError:
gexiv_available = False
pytz_available = True
try:
import pytz
except ImportError:
pytz_available = False
class PlateConverter:
"""
TIFF-to-FITS converter class
"""
def __init__(self):
self.tiff_dir = ''
self.write_fits_dir = ''
self.write_wedge_dir = ''
self.scan_exif_timezone = None
self.wedge_height = None
self.cut_wedge = False
def assign_conf(self, conf):
"""
Parse configuration and set class attributes.
"""
if isinstance(conf, str):
conf = read_conf(conf)
for attr in ['tiff_dir', 'write_fits_dir', 'write_wedge_dir']:
try:
setattr(self, attr, conf.get('Files', attr))
except configparser.Error:
pass
for attr in ['scan_exif_timezone']:
try:
setattr(self, attr, conf.get('Image', attr))
except configparser.Error:
pass
for attr in ['wedge_height']:
try:
setattr(self, attr, conf.getint('Image', attr))
except configparser.Error:
pass
for attr in ['cut_wedge']:
try:
setattr(self, attr, conf.getboolean('Image', attr))
except configparser.Error:
pass
def batch_tiff2fits(self):
"""
Convert all TIFF images in the TIFF directory to FITS.
"""
for fn_tiff in sorted(glob.glob(os.path.join(self.tiff_dir, '*.tif'))):
self.tiff2fits(os.path.basename(fn_tiff))
def tiff2fits(self, filename, cut_wedge=None, wedge_height=None):
"""
Convert TIFF image to FITS.
Parameters
----------
filename : str
Filename of the TIFF image
cut_wedge : bool
If True, a wedge image is separated from below plate image
wedge_height : int
Height of the wedge in pixels
"""
if cut_wedge is None and self.cut_wedge is not None:
cut_wedge = self.cut_wedge
if wedge_height is None and self.wedge_height is not None:
wedge_height = self.wedge_height
fn_tiff = os.path.join(self.tiff_dir, filename)
im_pil = Image.open(fn_tiff)
exif_datetime = None
if gexiv_available:
exif = GExiv2.Metadata(fn_tiff)
if 'Exif.Image.DateTime' in exif:
exif_datetime = exif['Exif.Image.DateTime']
elif 'Exif.Photo.DateTimeDigitized' in exif:
exif_datetime = exif['Exif.Photo.DateTimeDigitized']
else:
try:
exif_datetime = im_pil.tag[306]
except Exception:
pass
if exif_datetime:
if exif_datetime[4] == ':':
exif_datetime = '{} {}'.format(exif_datetime[:10]
.replace(':', '-'),
exif_datetime[11:])
if pytz_available and self.scan_exif_timezone:
dt = datetime.strptime(exif_datetime, '%Y-%m-%d %H:%M:%S')
try:
dt_local = (pytz.timezone(self.scan_exif_timezone)
.localize(dt))
exif_datetime = (dt_local.astimezone(pytz.utc)
.strftime('%Y-%m-%dT%H:%M:%S'))
except pytz.exceptions.UnknownTimeZoneError:
pass
im = np.array(im_pil.getdata(),
dtype=np.uint16).reshape(im_pil.size[1],-1)
imwidth = im.shape[1]
imheight = im.shape[0]
imblack = im.min()
imwhite = im.max()
# Cut wedge image if necessary
if not cut_wedge or wedge_height == 0:
im_plates = im
im_wedge = None
elif cut_wedge and wedge_height != 0:
ycut_wedge = imheight - wedge_height
im_wedge = im[ycut_wedge:,:]
im_plates = im[:ycut_wedge,:]
else:
yedge = []
yedge_plate = []
for x in np.arange(100, imwidth-100, 10):
# Take column, reverse it and use pixels from the 101st to
# 80% of the image height.
colrev = im[::-1,x][100:int(0.8*imheight)]
# Find nearly white pixels
ind_white = np.where(colrev-imblack > 0.95*(imwhite-imblack))[0]
# If the first near-white pixel is significantly lighter than
# the first 500 pixels in colrev, then use it as an edge of the
# wedge.
if (ind_white.size > 0 and
colrev[ind_white[0]]-imblack
> 1.1*(np.median(colrev[:500])-imblack)):
yedge.append(imheight - 100 - ind_white[0])
else:
col = im[int(0.2*imheight):,x]
ind_white = np.where(col-imblack
> 0.95*(imwhite-imblack))[0]
if (ind_white.size > 0 and
col[ind_white[0]]-imblack
> 1.1*(np.median(col[:500])-imblack)):
yedge_plate.append(ind_white[0] + int(0.2*imheight))
if len(yedge) > 0.01*imwidth:
ycut_wedge = int(np.median(yedge))
im_wedge = im[ycut_wedge:,:]
im_plates = im[:ycut_wedge,:]
else:
try:
ycut_wedge = int(np.percentile(yedge_plate, 80))
im_wedge = im[ycut_wedge:,:]
im_plates = im[:ycut_wedge,:]
except ValueError:
print('Cannot separate wedge in {}'.format(fn_tiff))
im_wedge = None
im_plates = im
del im
history_line = ('TIFF image converted to FITS with '
'PyPlate v{} at {}'
.format(__version__, datetime.utcnow()
.strftime('%Y-%m-%dT%H:%M:%S')))
if im_wedge is not None:
hdu_wedge = fits.PrimaryHDU(np.flipud(im_wedge))
if exif_datetime:
hdu_wedge.header.set('DATESCAN', exif_datetime)
hdu_wedge.header.add_history(history_line)
# Create wedge output directory
if self.write_wedge_dir:
try:
os.makedirs(self.write_wedge_dir)
except OSError:
if not os.path.isdir(self.write_wedge_dir):
print('Could not create directory {}'
.format(write_wedge_dir))
raise
# Create FITS image output directory
if self.write_fits_dir:
try:
os.makedirs(self.write_fits_dir)
except OSError:
if not os.path.isdir(self.write_fits_dir):
print('Could not create directory {}'
.format(write_fits_dir))
raise
# If filename contains dash, assume that two plates have been scanned
# side by side.
if '-' in os.path.basename(fn_tiff):
xedge = []
for y in np.arange(100, im_plates.shape[0]-100, 10):
row = im_plates[y,:]
row_mid = row[int(0.25*row.size):int(0.75*row.size)]
if row_mid.max() > 1.1*np.median(row_mid):
xedge.append(np.argmax(row_mid)+int(0.25*row.size))
xcut = int(np.median(xedge))
im_left = im_plates[:,:xcut]
im_right = im_plates[:,xcut:]
del im_plates
fn_two = os.path.splitext(os.path.basename(fn_tiff))[0]
fn_parts = fn_two.split('-')
fn_left = '{}{}.fits'.format(fn_parts[0][:7], fn_parts[1])
fn_right = '{}.fits'.format(fn_parts[0])
# Store left-side plate FITS
hdu_left = fits.PrimaryHDU(np.flipud(im_left))
hdu_left.header.set('MINVAL', im_left.min())
hdu_left.header.set('MAXVAL', im_left.max())
if exif_datetime:
hdu_left.header.set('DATESCAN', exif_datetime)
hdu_left.header.add_history(history_line)
hdu_left.writeto(os.path.join(self.write_fits_dir, fn_left),
overwrite=True)
if im_wedge is not None:
fn_wedge = os.path.splitext(fn_left)[0] + '_w.fits'
hdu_wedge.writeto(os.path.join(self.write_wedge_dir, fn_wedge),
overwrite=True)
# Store right-side plate FITS
hdu_right = fits.PrimaryHDU(np.flipud(im_right))
hdu_right.header.set('MINVAL', im_right.min())
hdu_right.header.set('MAXVAL', im_right.max())
if exif_datetime:
hdu_right.header.set('DATESCAN', exif_datetime)
hdu_right.header.add_history(history_line)
hdu_right.writeto(os.path.join(self.write_fits_dir, fn_right),
overwrite=True)
if im_wedge is not None:
fn_wedge = os.path.splitext(fn_right)[0] + '_w.fits'
hdu_wedge.writeto(os.path.join(self.write_wedge_dir, fn_wedge),
overwrite=True)
else:
fn_plate = os.path.splitext(os.path.basename(fn_tiff))[0] + '.fits'
hdu_plate = fits.PrimaryHDU(np.flipud(im_plates))
hdu_plate.header.set('MINVAL', im_plates.min())
hdu_plate.header.set('MAXVAL', im_plates.max())
if exif_datetime:
hdu_plate.header.set('DATESCAN', exif_datetime)
hdu_plate.header.add_history(history_line)
hdu_plate.writeto(os.path.join(self.write_fits_dir, fn_plate),
overwrite=True)
if im_wedge is not None:
fn_wedge = os.path.splitext(fn_plate)[0] + '_w.fits'
hdu_wedge.writeto(os.path.join(self.write_wedge_dir, fn_wedge),
overwrite=True)
| {
"content_hash": "c077a409a9a3f8bdd9213dfe2e0c275f",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 80,
"avg_line_length": 34.73701298701299,
"alnum_prop": 0.5010748668099823,
"repo_name": "astrotuvi/pyplate",
"id": "e5c5083e32d4fcb87f306a7513acadc65b97b133",
"size": "10699",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyplate/image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "515342"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.relay import RelayAPI
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-relay
# USAGE
python relay_name_space_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = RelayAPI(
credential=DefaultAzureCredential(),
subscription_id="ffffffff-ffff-ffff-ffff-ffffffffffff",
)
response = client.namespaces.begin_delete(
resource_group_name="SouthCentralUS",
namespace_name="example-RelayNamespace-5849",
).result()
print(response)
# x-ms-original-file: specification/relay/resource-manager/Microsoft.Relay/stable/2021-11-01/examples/NameSpaces/RelayNameSpaceDelete.json
if __name__ == "__main__":
main()
| {
"content_hash": "8e7bfda3dbee8aaf54e01b04a717e63e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 138,
"avg_line_length": 33.303030303030305,
"alnum_prop": 0.72975432211101,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b482aad2f50505cc73e0c7228a60a18ed4df88ae",
"size": "1567",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/relay/azure-mgmt-relay/generated_samples/relay_name_space_delete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Turma.test_models
~~~~~~~~~~~~~~
Testa coisas relacionada ao modelo.
:copyright: (c) 2011 by Felipe Arruda Pontes.
"""
from django.test import TestCase
from model_mommy import mommy
from Materia.Turma.models import Turma
class TurmaTest(TestCase):
def setUp(self):
self.turma = mommy.make_one(Turma)
def test_turma_save(self):
" verifica se consegue salvar um turma "
self.turma.save()
self.assertEqual(self.turma.id, 1)
| {
"content_hash": "d01f5c1086f2dec514301c570421c362",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 49,
"avg_line_length": 19.653846153846153,
"alnum_prop": 0.6183953033268101,
"repo_name": "arruda/amao",
"id": "dc81c6d869d288f29215d4d1818f0a2c9ef2e92c",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AMAO/apps/Materia/Turma/tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18513"
},
{
"name": "C++",
"bytes": "2359"
},
{
"name": "CSS",
"bytes": "21310"
},
{
"name": "JavaScript",
"bytes": "3452"
},
{
"name": "Python",
"bytes": "389608"
},
{
"name": "Ruby",
"bytes": "520"
},
{
"name": "Shell",
"bytes": "13785"
}
],
"symlink_target": ""
} |
import httplib
import json
import socket
import time
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.nec.common import config
from neutron.plugins.nec.common import exceptions as nexc
LOG = logging.getLogger(__name__)
class OFCClient(object):
"""A HTTP/HTTPS client for OFC Drivers."""
def __init__(self, host="127.0.0.1", port=8888, use_ssl=False,
key_file=None, cert_file=None):
"""Creates a new client to some OFC.
:param host: The host where service resides
:param port: The port where service resides
:param use_ssl: True to use SSL, False to use HTTP
:param key_file: The SSL key file to use if use_ssl is true
:param cert_file: The SSL cert file to use if use_ssl is true
"""
self.host = host
self.port = port
self.use_ssl = use_ssl
self.key_file = key_file
self.cert_file = cert_file
self.connection = None
def get_connection(self):
"""Returns the proper connection."""
if self.use_ssl:
connection_type = httplib.HTTPSConnection
else:
connection_type = httplib.HTTPConnection
# Open connection and send request, handling SSL certs
certs = {'key_file': self.key_file, 'cert_file': self.cert_file}
certs = dict((x, certs[x]) for x in certs if certs[x] is not None)
if self.use_ssl and len(certs):
conn = connection_type(self.host, self.port, **certs)
else:
conn = connection_type(self.host, self.port)
return conn
def _format_error_message(self, status, detail):
detail = ' ' + detail if detail else ''
return (_("Operation on OFC failed: %(status)s%(msg)s") %
{'status': status, 'msg': detail})
def do_single_request(self, method, action, body=None):
action = config.OFC.path_prefix + action
LOG.debug(_("Client request: %(host)s:%(port)s "
"%(method)s %(action)s [%(body)s]"),
{'host': self.host, 'port': self.port,
'method': method, 'action': action, 'body': body})
if type(body) is dict:
body = json.dumps(body)
try:
conn = self.get_connection()
headers = {"Content-Type": "application/json"}
conn.request(method, action, body, headers)
res = conn.getresponse()
data = res.read()
LOG.debug(_("OFC returns [%(status)s:%(data)s]"),
{'status': res.status,
'data': data})
# Try to decode JSON data if possible.
try:
data = json.loads(data)
except (ValueError, TypeError):
pass
if res.status in (httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.NO_CONTENT):
return data
elif res.status == httplib.SERVICE_UNAVAILABLE:
retry_after = res.getheader('retry-after')
LOG.warning(_("OFC returns ServiceUnavailable "
"(retry-after=%s)"), retry_after)
raise nexc.OFCServiceUnavailable(retry_after=retry_after)
elif res.status == httplib.NOT_FOUND:
LOG.info(_("Specified resource %s does not exist on OFC "),
action)
raise nexc.OFCResourceNotFound(resource=action)
else:
LOG.warning(_("Operation on OFC failed: "
"status=%(status)s, detail=%(detail)s"),
{'status': res.status, 'detail': data})
params = {'reason': _("Operation on OFC failed"),
'status': res.status}
if isinstance(data, dict):
params['err_code'] = data.get('err_code')
params['err_msg'] = data.get('err_msg')
else:
params['err_msg'] = data
raise nexc.OFCException(**params)
except (socket.error, IOError) as e:
reason = _("Failed to connect OFC : %s") % e
LOG.error(reason)
raise nexc.OFCException(reason=reason)
def do_request(self, method, action, body=None):
max_attempts = config.OFC.api_max_attempts
for i in range(max_attempts, 0, -1):
try:
return self.do_single_request(method, action, body)
except nexc.OFCServiceUnavailable as e:
with excutils.save_and_reraise_exception() as ctxt:
try:
wait_time = int(e.retry_after)
except (ValueError, TypeError):
wait_time = None
if i > 1 and wait_time:
LOG.info(_("Waiting for %s seconds due to "
"OFC Service_Unavailable."), wait_time)
time.sleep(wait_time)
ctxt.reraise = False
continue
def get(self, action):
return self.do_request("GET", action)
def post(self, action, body=None):
return self.do_request("POST", action, body=body)
def put(self, action, body=None):
return self.do_request("PUT", action, body=body)
def delete(self, action):
return self.do_request("DELETE", action)
| {
"content_hash": "e5b0bd584c4c4973a01007731c4e2c95",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 75,
"avg_line_length": 40.194244604316545,
"alnum_prop": 0.5294433506354036,
"repo_name": "yamahata/neutron",
"id": "957f24483a89fd4b640185821d36fd2aa714ab16",
"size": "6286",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/plugins/nec/common/ofc_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8778164"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from collections import defaultdict
from Queue import Empty, Queue
import threading
import time
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from config import _is_affirmative
TIMEOUT = 180
DEFAULT_SIZE_POOL = 6
MAX_LOOP_ITERATIONS = 1000
FAILURE = "FAILURE"
class Status:
DOWN = "DOWN"
WARNING = "WARNING"
CRITICAL = "CRITICAL"
UP = "UP"
class EventType:
DOWN = "servicecheck.state_change.down"
UP = "servicecheck.state_change.up"
class NetworkCheck(AgentCheck):
SOURCE_TYPE_NAME = 'servicecheck'
SERVICE_CHECK_PREFIX = 'network_check'
STATUS_TO_SERVICE_CHECK = {
Status.UP : AgentCheck.OK,
Status.WARNING : AgentCheck.WARNING,
Status.CRITICAL : AgentCheck.CRITICAL,
Status.DOWN : AgentCheck.CRITICAL,
}
"""
Services checks inherits from this class.
This class should never be directly instanciated.
Work flow:
The main agent loop will call the check function for each instance for
each iteration of the loop.
The check method will make an asynchronous call to the _process method in
one of the thread initiated in the thread pool created in this class constructor.
The _process method will call the _check method of the inherited class
which will perform the actual check.
The _check method must return a tuple which first element is either
Status.UP or Status.DOWN.
The second element is a short error message that will be displayed
when the service turns down.
"""
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# A dictionary to keep track of service statuses
self.statuses = {}
self.notified = {}
self.nb_failures = 0
self.pool_started = False
# Make sure every instance has a name that we use as a unique key
# to keep track of statuses
names = []
for inst in instances:
if 'name' not in inst:
raise Exception("All instances should have a 'name' parameter,"
" error on instance: {0}".format(inst))
if inst['name'] in names:
raise Exception("Duplicate names for instances with name {0}"
.format(inst['name']))
def stop(self):
self.stop_pool()
self.pool_started = False
def start_pool(self):
# The pool size should be the minimum between the number of instances
# and the DEFAULT_SIZE_POOL. It can also be overridden by the 'threads_count'
# parameter in the init_config of the check
self.log.info("Starting Thread Pool")
default_size = min(self.instance_count(), DEFAULT_SIZE_POOL)
self.pool_size = int(self.init_config.get('threads_count', default_size))
self.pool = Pool(self.pool_size)
self.resultsq = Queue()
self.jobs_status = {}
self.jobs_results = {}
self.pool_started = True
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
def restart_pool(self):
self.stop_pool()
self.start_pool()
def check(self, instance):
if not self.pool_started:
self.start_pool()
if threading.activeCount() > 5 * self.pool_size + 5: # On Windows the agent runs on multiple threads so we need to have an offset of 5 in case the pool_size is 1
raise Exception("Thread number (%s) is exploding. Skipping this check" % threading.activeCount())
self._process_results()
self._clean()
name = instance.get('name', None)
if name is None:
self.log.error('Each service check must have a name')
return
if name not in self.jobs_status:
# A given instance should be processed one at a time
self.jobs_status[name] = time.time()
self.jobs_results[name] = self.pool.apply_async(self._process, args=(instance,))
else:
self.log.error("Instance: %s skipped because it's already running." % name)
def _process(self, instance):
try:
statuses = self._check(instance)
if isinstance(statuses, tuple):
# Assume the check only returns one service check
status, msg = statuses
self.resultsq.put((status, msg, None, instance))
elif isinstance(statuses, list):
for status in statuses:
sc_name, status, msg = status
self.resultsq.put((status, msg, sc_name, instance))
except Exception:
result = (FAILURE, FAILURE, FAILURE, FAILURE)
self.resultsq.put(result)
def _process_results(self):
for i in xrange(MAX_LOOP_ITERATIONS):
try:
# We want to fetch the result in a non blocking way
status, msg, sc_name, instance = self.resultsq.get_nowait()
except Empty:
break
if status == FAILURE:
self.nb_failures += 1
if self.nb_failures >= self.pool_size - 1:
self.nb_failures = 0
self.restart_pool()
continue
self.report_as_service_check(sc_name, status, instance, msg)
# FIXME: 5.3, this has been deprecated before, get rid of events
# Don't create any event to avoid duplicates with server side
# service_checks
skip_event = _is_affirmative(instance.get('skip_event', False))
instance_name = instance['name']
if not skip_event:
self.warning("Using events for service checks is deprecated in favor of monitors and will be removed in future versions of the Datadog Agent.")
event = None
if instance_name not in self.statuses:
self.statuses[instance_name] = defaultdict(list)
self.statuses[instance_name][sc_name].append(status)
window = int(instance.get('window', 1))
if window > 256:
self.log.warning("Maximum window size (256) exceeded, defaulting it to 256")
window = 256
threshold = instance.get('threshold', 1)
if len(self.statuses[instance_name][sc_name]) > window:
self.statuses[instance_name][sc_name].pop(0)
nb_failures = self.statuses[instance_name][sc_name].count(Status.DOWN)
if nb_failures >= threshold:
if self.notified.get((instance_name, sc_name), Status.UP) != Status.DOWN:
event = self._create_status_event(sc_name, status, msg, instance)
self.notified[(instance_name, sc_name)] = Status.DOWN
else:
if self.notified.get((instance_name, sc_name), Status.UP) != Status.UP:
event = self._create_status_event(sc_name, status, msg, instance)
self.notified[(instance_name, sc_name)] = Status.UP
if event is not None:
self.events.append(event)
# The job is finished here, this instance can be re processed
if instance_name in self.jobs_status:
del self.jobs_status[instance_name]
# if an exception happened, log it
if instance_name in self.jobs_results:
ret = self.jobs_results[instance_name].get()
if isinstance(ret, Exception):
self.log.exception("Exception in worker thread: {0}".format(ret))
del self.jobs_results[instance_name]
def _check(self, instance):
"""This function should be implemented by inherited classes"""
raise NotImplementedError
def _clean(self):
now = time.time()
for name, start_time in self.jobs_status.iteritems():
if now - start_time > TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck: %s" % name)
self.restart_pool()
break
| {
"content_hash": "d2913a8a3bed9dbb0d8be6d3bd7ac29f",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 169,
"avg_line_length": 37.72,
"alnum_prop": 0.5852480263933074,
"repo_name": "mderomph-coolblue/dd-agent",
"id": "eb37f0b995fda913f7260c71d095d80fde235078",
"size": "8496",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "checks/network_checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2717"
},
{
"name": "Go",
"bytes": "2389"
},
{
"name": "HTML",
"bytes": "8758"
},
{
"name": "Nginx",
"bytes": "3908"
},
{
"name": "PowerShell",
"bytes": "2665"
},
{
"name": "Python",
"bytes": "2051024"
},
{
"name": "Ruby",
"bytes": "98141"
},
{
"name": "Shell",
"bytes": "54709"
},
{
"name": "XSLT",
"bytes": "2222"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
BEFORE_EVOLUTIONS = [
'evolutions_app2',
('evolutions_app2', 'second_evolution'),
]
AFTER_EVOLUTIONS = [
'evolutions_app',
('evolutions_app', 'first_evolution'),
]
BEFORE_MIGRATIONS = [
('migrations_app2', '0002_add_field'),
]
AFTER_MIGRATIONS = [
('migrations_app', '0001_initial'),
]
MUTATIONS = []
| {
"content_hash": "f3ab196bc5527fa8f0b2ed28b41a62c6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 44,
"avg_line_length": 16.863636363636363,
"alnum_prop": 0.633423180592992,
"repo_name": "beanbaginc/django-evolution",
"id": "46aec3dfac34cec6edbdcc8a77b47487cc8c1249",
"size": "371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_evolution/tests/evolution_deps_app/evolutions/test_evolution.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1892034"
}
],
"symlink_target": ""
} |
"""Support for Satel Integra zone states- represented as binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
CONF_OUTPUTS,
CONF_ZONE_NAME,
CONF_ZONE_TYPE,
CONF_ZONES,
DATA_SATEL,
SIGNAL_OUTPUTS_UPDATED,
SIGNAL_ZONES_UPDATED,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Satel Integra binary sensor devices."""
if not discovery_info:
return
configured_zones = discovery_info[CONF_ZONES]
controller = hass.data[DATA_SATEL]
devices = []
for zone_num, device_config_data in configured_zones.items():
zone_type = device_config_data[CONF_ZONE_TYPE]
zone_name = device_config_data[CONF_ZONE_NAME]
device = SatelIntegraBinarySensor(
controller, zone_num, zone_name, zone_type, SIGNAL_ZONES_UPDATED
)
devices.append(device)
configured_outputs = discovery_info[CONF_OUTPUTS]
for zone_num, device_config_data in configured_outputs.items():
zone_type = device_config_data[CONF_ZONE_TYPE]
zone_name = device_config_data[CONF_ZONE_NAME]
device = SatelIntegraBinarySensor(
controller, zone_num, zone_name, zone_type, SIGNAL_OUTPUTS_UPDATED
)
devices.append(device)
async_add_entities(devices)
class SatelIntegraBinarySensor(BinarySensorEntity):
"""Representation of an Satel Integra binary sensor."""
def __init__(
self, controller, device_number, device_name, zone_type, react_to_signal
):
"""Initialize the binary_sensor."""
self._device_number = device_number
self._name = device_name
self._zone_type = zone_type
self._state = 0
self._react_to_signal = react_to_signal
self._satel = controller
async def async_added_to_hass(self):
"""Register callbacks."""
if self._react_to_signal == SIGNAL_OUTPUTS_UPDATED:
if self._device_number in self._satel.violated_outputs:
self._state = 1
else:
self._state = 0
else:
if self._device_number in self._satel.violated_zones:
self._state = 1
else:
self._state = 0
self.async_on_remove(
async_dispatcher_connect(
self.hass, self._react_to_signal, self._devices_updated
)
)
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def icon(self):
"""Icon for device by its type."""
if self._zone_type == "smoke":
return "mdi:fire"
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state == 1
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return self._zone_type
@callback
def _devices_updated(self, zones):
"""Update the zone's state, if needed."""
if self._device_number in zones and self._state != zones[self._device_number]:
self._state = zones[self._device_number]
self.async_write_ha_state()
| {
"content_hash": "361756d0d94660a6e0d6e6d6eda3d8f8",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 86,
"avg_line_length": 30.608695652173914,
"alnum_prop": 0.6167613636363637,
"repo_name": "titilambert/home-assistant",
"id": "19763903f2784547fbdff7f53f19005737520df6",
"size": "3520",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/satel_integra/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os.path
import urllib.parse
from zipfile import ZipFile
from openelex.base.fetch import BaseFetcher
from openelex.us.wa.datasource import Datasource
class FetchResults(BaseFetcher):
def __init__(self):
super(FetchResults, self).__init__()
self._fetched = set()
# We need access to the state datasource to be able to retrieve
# mappings for a specific URL in the case of zip files since multiple
# extracted files will come from the same URL.
self._datasource = Datasource()
def fetch(self, url, fname=None, overwrite=False):
# We keep track of URLs we've already fetched in this run since
# there will be multiple output files mapped to a single zip
# file. If we've already fetched this URL, exit early.
if url in self._fetched:
return
if url.endswith('.zip'):
# Fetch the zip file, using the automatically generated filename
zip_fname = self._local_zip_file_name(url)
super(FetchResults, self).fetch(url, zip_fname, overwrite)
self._extract_zip(url, zip_fname, overwrite)
else:
super(FetchResults, self).fetch(url, fname, overwrite)
self._fetched.add(url)
def _local_zip_file_name(self, url):
"""
Return a normalized local file name for a results zip file.
We don't care too much about the format because we can delete the
zip file later.
"""
parsed = urllib.parse.urlsplit(url)
fname = parsed.path.split('/')[-1]
return os.path.join(self.cache.abspath, fname)
def _extract_zip(self, url, zip_fname=None, overwrite=False, remove=True):
if zip_fname is None:
zip_fname = self._local_zip_file_name(url)
with ZipFile(zip_fname, 'r') as zipf:
for mapping in self._datasource.mappings_for_url(url):
local_file_name = os.path.join(self.cache.abspath,
mapping['generated_filename'])
if overwrite or not os.path.exists(local_file_name):
if mapping['parent_zipfile']:
# The downloaded ZIP archive contains zip files. We
# need to extract the nested zip file.
zipf.extract(mapping['parent_zipfile'],
self.cache.abspath)
parent_zipfile_path = os.path.join(self.cache.abspath,
mapping['parent_zipfile'])
with ZipFile(parent_zipfile_path, 'r') as parent_zipf:
parent_zipf.extract(mapping['raw_extracted_filename'],
self.cache.abspath)
if remove:
# Remove the parent zipfile
os.remove(parent_zipfile_path)
parent_zipfile_dir = os.path.dirname(mapping['parent_zipfile'])
# If the extracted parent zipfile lives in a
# subdirectory, we'll want to remove the directory
# as well
if parent_zipfile_dir:
os.rmdir(os.path.join(self.cache.abspath,
parent_zipfile_dir))
else:
zipf.extract(mapping['raw_extracted_filename'],
self.cache.abspath)
extracted_file_name = os.path.join(self.cache.abspath,
mapping['raw_extracted_filename'])
os.rename(extracted_file_name, local_file_name)
print("Added to cache: %s" % local_file_name)
else:
print("File is cached: %s" % local_file_name)
if remove:
os.remove(zip_fname)
| {
"content_hash": "5b6cba1fff84fd36372c13a07825cf0b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 91,
"avg_line_length": 45.03333333333333,
"alnum_prop": 0.5474956822107081,
"repo_name": "openelections/openelections-core",
"id": "4de101388268c4b69e51ee7aee15ea73efcd77d2",
"size": "4053",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "openelex/us/wa/fetch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "57395"
},
{
"name": "Python",
"bytes": "949426"
}
],
"symlink_target": ""
} |
"""Generic interfaces to perform BMC.
It defines the results (e.g. traces, status).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import numpy as np
import six
class InvarStatus:
Safe, Unsafe, Unknown = range(3)
@six.add_metaclass(abc.ABCMeta)
class BMCSpec():
"""BMCSpec
Defines the spec for a BMC engine"""
#__metaclass__ = abc.ABCMeta
# supersedes previous ways
@abc.abstractmethod
def trace_generator(self):
"""Returns trace generator"""
raise NotImplementedError
@abc.abstractmethod
def check(self, depth):
"""Returns one value from InvarStatus"""
raise NotImplementedError
@abc.abstractmethod
def get_trace(self):
"""Returns the last trace found or None if no trace exists."""
raise NotImplementedError
@abc.abstractmethod
def gen_new_disc_trace(self):
raise NotImplementedError
@abc.abstractmethod
def get_pwa_trace(self):
raise NotImplementedError
@six.add_metaclass(abc.ABCMeta)
class TraceSimple(object):
"""Simple Trace: provides minimal functionality"""
@abc.abstractmethod
def __getitem__(self, idx):
raise NotImplementedError
@abc.abstractmethod
def __iter__(self):
raise NotImplementedError
@abc.abstractmethod
def to_array(self):
raise NotImplementedError
@abc.abstractmethod
def __len__(self):
raise NotImplementedError
@abc.abstractmethod
def __str__(self):
raise NotImplementedError
| {
"content_hash": "b5d8533d65fa7d3a037d428a67c24c3c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 70,
"avg_line_length": 21.77027027027027,
"alnum_prop": 0.6691495965238982,
"repo_name": "zutshi/S3CAMR",
"id": "af2dab812b4d09497c6a638c92f13b423d3f6cce",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bmc/bmc_spec.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2970"
},
{
"name": "MATLAB",
"bytes": "14618"
},
{
"name": "Makefile",
"bytes": "232"
},
{
"name": "Python",
"bytes": "528716"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('gender', models.CharField(default=b'U', max_length=1, choices=[(b'F', 'Female'), (b'M', 'Male'), (b'U', 'Unknown gender')])),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('middle_name', models.CharField(max_length=50, blank=True)),
('maiden_name', models.CharField(max_length=50, blank=True)),
('birth_date', models.DateField()),
('status', models.CharField(default=b'P', max_length=1, choices=[(b'P', 'Pending'), (b'A', 'Active'), (b'I', 'Inactive')])),
('native_lang', models.CharField(max_length=2, verbose_name='Native language', choices=[(b'EN', 'English'), (b'FR', 'French'), (b'EF', 'English/French')])),
('com_lang', models.CharField(max_length=2, verbose_name='Language for communication', choices=[(b'EN', 'English'), (b'FR', 'French'), (b'EF', 'English/French')])),
('cdif_exd', models.BooleanField(default=False, verbose_name='Expressive difficulty')),
('cdif_hoh', models.BooleanField(default=False, verbose_name='Hard of hearing')),
('cdif_anl', models.BooleanField(default=False, verbose_name='Analphabete')),
('cdif_cog', models.BooleanField(default=False, verbose_name='Cognitive loss')),
('direct_contact', models.BooleanField(default=True, choices=[(True, 'Yes'), (False, 'No')])),
('directions', models.TextField(default=b'', max_length=256, blank=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Referral',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ref_date', models.DateField()),
('notes', models.TextField(default=b'', max_length=250, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ReferralReason',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reason_fr', models.CharField(max_length=100)),
('reason_en', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('emergency', models.BooleanField(default=False, verbose_name='Emergency contact')),
('follow_up', models.BooleanField(default=False, verbose_name='Follow-up')),
('info', models.CharField(max_length=100, verbose_name='Additional information', blank=True)),
('client', models.ForeignKey(to='clients.Client')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RelationType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contact_type', models.CharField(default=b'N', max_length=1, choices=[(b'N', 'Next of kin'), (b'W', 'Case worker')])),
('type_en', models.CharField(max_length=20)),
('type_fr', models.CharField(max_length=20)),
],
options={
},
bases=(models.Model,),
),
]
| {
"content_hash": "e29f5557ce709e8b9588d4f8b09b97d5",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 180,
"avg_line_length": 54.27956989247312,
"alnum_prop": 0.5641838351822503,
"repo_name": "delphcf/sis",
"id": "e1e4d567febbfd1a7814efd2260bdfd6ea02d75b",
"size": "5072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sis/clients/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "238944"
},
{
"name": "HTML",
"bytes": "152294"
},
{
"name": "JavaScript",
"bytes": "615251"
},
{
"name": "Python",
"bytes": "273835"
}
],
"symlink_target": ""
} |
"""Operations to emit summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import getpass
import os
import re
import threading
import time
import six
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import profiler as _profiler
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_summary_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import summary_op_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.util import deprecation
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# Name for graph collection of summary writer init ops, which is only exposed
# as a legacy API for tf.contrib.summary in TF 1.x.
_SUMMARY_WRITER_INIT_COLLECTION_NAME = "_SUMMARY_WRITER_V2"
_EXPERIMENT_NAME_PATTERNS = re.compile(r"^[^\x00-\x1F<>]{0,256}$")
_RUN_NAME_PATTERNS = re.compile(r"^[^\x00-\x1F<>]{0,512}$")
_USER_NAME_PATTERNS = re.compile(r"^[a-z]([-a-z0-9]{0,29}[a-z0-9])?$", re.I)
class _SummaryState(threading.local):
def __init__(self):
super(_SummaryState, self).__init__()
self.is_recording = None
# TODO(slebedev): why a separate flag for DS and is it on by default?
self.is_recording_distribution_strategy = True
self.writer = None
self.step = None
_summary_state = _SummaryState()
def _should_record_summaries_internal(default_state):
"""Returns boolean Tensor if summaries should/shouldn't be recorded.
Now the summary condition is decided by logical "and" of below conditions:
First, summary writer must be set. Given this constraint is met,
ctx.summary_recording and ctx.summary_recording_distribution_strategy.
The former one is usually set by user, and the latter one is controlled
by DistributionStrategy (tf.distribute.ReplicaContext).
Args:
default_state: can be True or False. The default summary behavior when
summary writer is set and the user does not specify
ctx.summary_recording and ctx.summary_recording_distribution_strategy
is True.
"""
if _summary_state.writer is None:
return constant_op.constant(False)
if not callable(_summary_state.is_recording):
static_cond = tensor_util.constant_value(_summary_state.is_recording)
if static_cond is not None and not static_cond:
return constant_op.constant(False)
resolve = lambda x: x() if callable(x) else x
cond_distributed = resolve(_summary_state.is_recording_distribution_strategy)
cond = resolve(_summary_state.is_recording)
if cond is None:
cond = default_state
return math_ops.logical_and(cond_distributed, cond)
def _should_record_summaries_v2():
"""Returns boolean Tensor which is true if summaries should be recorded.
If no recording status has been set, this defaults to True, unlike the public
should_record_summaries().
"""
return _should_record_summaries_internal(default_state=True)
@tf_export("summary.should_record_summaries", v1=[])
def should_record_summaries():
"""Returns boolean Tensor which is true if summaries should be recorded."""
return _should_record_summaries_internal(default_state=False)
@tf_export("summary.record_if", v1=[])
@tf_contextlib.contextmanager
def record_if(condition):
"""Sets summary recording on or off per the provided boolean value.
The provided value can be a python boolean, a scalar boolean Tensor, or
or a callable providing such a value; if a callable is passed it will be
invoked on-demand to determine whether summary writing will occur.
Args:
condition: can be True, False, a bool Tensor, or a callable providing such.
Yields:
Returns a context manager that sets this value on enter and restores the
previous value on exit.
"""
old = _summary_state.is_recording
try:
_summary_state.is_recording = condition
yield
finally:
_summary_state.is_recording = old
# TODO(apassos) consider how to handle local step here.
def record_summaries_every_n_global_steps(n, global_step=None):
"""Sets the should_record_summaries Tensor to true if global_step % n == 0."""
if global_step is None:
global_step = training_util.get_or_create_global_step()
with ops.device("cpu:0"):
should = lambda: math_ops.equal(global_step % n, 0)
if not context.executing_eagerly():
should = should()
return record_if(should)
def always_record_summaries():
"""Sets the should_record_summaries Tensor to always true."""
return record_if(True)
def never_record_summaries():
"""Sets the should_record_summaries Tensor to always false."""
return record_if(False)
@tf_export("summary.experimental.get_step", v1=[])
def get_step():
"""Returns the default summary step for the current thread.
Returns:
The step set by `tf.summary.experimental.set_step()` if one has been set,
otherwise None.
"""
return _summary_state.step
@tf_export("summary.experimental.set_step", v1=[])
def set_step(step):
"""Sets the default summary step for the current thread.
For convenience, this function sets a default value for the `step` parameter
used in summary-writing functions elsewhere in the API so that it need not
be explicitly passed in every such invocation. The value can be a constant
or a variable, and can be retrieved via `tf.summary.experimental.get_step()`.
Note: when using this with @tf.functions, the step value will be captured at
the time the function is traced, so changes to the step outside the function
will not be reflected inside the function unless using a `tf.Variable` step.
Args:
step: An `int64`-castable default step value, or None to unset.
"""
_summary_state.step = step
@tf_export("summary.SummaryWriter", v1=[])
@six.add_metaclass(abc.ABCMeta)
class SummaryWriter(object):
"""Interface representing a stateful summary writer object."""
@abc.abstractmethod
def set_as_default(self):
"""Enables this summary writer for the current thread."""
raise NotImplementedError()
@abc.abstractmethod
@tf_contextlib.contextmanager
def as_default(self):
"""Returns a context manager that enables summary writing."""
raise NotImplementedError()
def init(self):
"""Initializes the summary writer."""
raise NotImplementedError()
def flush(self):
"""Flushes any buffered data."""
raise NotImplementedError()
def close(self):
"""Flushes and closes the summary writer."""
raise NotImplementedError()
class ResourceSummaryWriter(SummaryWriter):
"""Implementation of SummaryWriter using a SummaryWriterInterface resource."""
def __init__(self,
shared_name,
init_op_fn,
name=None,
v2=False,
metadata=None):
self._resource = gen_summary_ops.summary_writer(
shared_name=shared_name, name=name)
# TODO(nickfelt): cache other constructed ops in graph mode
self._init_op_fn = init_op_fn
self._init_op = init_op_fn(self._resource)
self._v2 = v2
self._metadata = {} if metadata is None else metadata
self._closed = False
if context.executing_eagerly():
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="cpu:0")
else:
ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, self._init_op)
def set_as_default(self):
"""Enables this summary writer for the current thread."""
if self._v2 and context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
_summary_state.writer = self
@tf_contextlib.contextmanager
def as_default(self):
"""Returns a context manager that enables summary writing."""
if self._v2 and context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
old = _summary_state.writer
try:
_summary_state.writer = self
yield self
# Flushes the summary writer in eager mode or in graph functions, but
# not in legacy graph mode (you're on your own there).
self.flush()
finally:
_summary_state.writer = old
def init(self):
"""Initializes the summary writer."""
if self._v2:
if context.executing_eagerly() and self._closed:
raise RuntimeError("SummaryWriter is already closed")
return self._init_op
# Legacy behavior allows re-initializing the resource.
return self._init_op_fn(self._resource)
def flush(self):
"""Flushes any buffered data."""
if self._v2 and context.executing_eagerly() and self._closed:
return
return _flush_fn(writer=self)
def close(self):
"""Flushes and closes the summary writer."""
if self._v2 and context.executing_eagerly() and self._closed:
return
try:
with ops.control_dependencies([self.flush()]):
with ops.device("cpu:0"):
return gen_summary_ops.close_summary_writer(self._resource)
finally:
if self._v2 and context.executing_eagerly():
self._closed = True
class NoopSummaryWriter(SummaryWriter):
"""A summary writer that does nothing, for create_noop_writer()."""
def set_as_default(self):
pass
@tf_contextlib.contextmanager
def as_default(self):
yield
def init(self):
pass
def flush(self):
pass
def close(self):
pass
@tf_export(v1=["summary.initialize"])
def initialize(
graph=None, # pylint: disable=redefined-outer-name
session=None):
"""Initializes summary writing for graph execution mode.
This operation is a no-op when executing eagerly.
This helper method provides a higher-level alternative to using
`tf.contrib.summary.summary_writer_initializer_op` and
`tf.contrib.summary.graph`.
Most users will also want to call `tf.compat.v1.train.create_global_step`
which can happen before or after this function is called.
Args:
graph: A `tf.Graph` or `tf.compat.v1.GraphDef` to output to the writer.
This function will not write the default graph by default. When
writing to an event log file, the associated step will be zero.
session: So this method can call `tf.Session.run`. This defaults
to `tf.compat.v1.get_default_session`.
Raises:
RuntimeError: If the current thread has no default
`tf.contrib.summary.SummaryWriter`.
ValueError: If session wasn't passed and no default session.
"""
if context.executing_eagerly():
return
if _summary_state.writer is None:
raise RuntimeError("No default tf.contrib.summary.SummaryWriter found")
if session is None:
session = ops.get_default_session()
if session is None:
raise ValueError("session must be passed if no default session exists")
session.run(summary_writer_initializer_op())
if graph is not None:
data = _serialize_graph(graph)
x = array_ops.placeholder(dtypes.string)
session.run(_graph(x, 0), feed_dict={x: data})
@tf_export("summary.create_file_writer", v1=[])
def create_file_writer_v2(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer for the given log directory.
Args:
logdir: a string specifying the directory in which to write an event file.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: a name for the op that creates the writer.
Returns:
A SummaryWriter object.
"""
if logdir is None:
raise ValueError("logdir cannot be None")
inside_function = ops.inside_function()
with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
# Run init inside an init_scope() to hoist it out of tf.functions.
with ops.init_scope():
if context.executing_eagerly():
_check_create_file_writer_args(
inside_function,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix)
logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
# Prepend the PID and a process-local UID to the filename suffix to avoid
# filename collisions within the machine (the filename already contains
# the hostname to avoid cross-machine collisions).
unique_prefix = constant_op.constant(".%s.%s" % (os.getpid(), ops.uid()))
filename_suffix = unique_prefix + filename_suffix
# Use a unique shared_name to prevent resource sharing.
if context.executing_eagerly():
shared_name = context.shared_name()
else:
shared_name = ops.name_from_scope_name(scope) # pylint: disable=protected-access
return ResourceSummaryWriter(
shared_name=shared_name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix),
name=name,
v2=True,
metadata={"logdir": logdir})
def create_file_writer(logdir,
max_queue=None,
flush_millis=None,
filename_suffix=None,
name=None):
"""Creates a summary file writer in the current context under the given name.
Args:
logdir: a string, or None. If a string, creates a summary file writer
which writes to the directory named by the string. If None, returns
a mock object which acts like a summary writer but does nothing,
useful to use as a context manager.
max_queue: the largest number of summaries to keep in a queue; will
flush once the queue gets bigger than this. Defaults to 10.
flush_millis: the largest interval between flushes. Defaults to 120,000.
filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
name: Shared name for this SummaryWriter resource stored to default
Graph. Defaults to the provided logdir prefixed with `logdir:`. Note: if a
summary writer resource with this shared name already exists, the returned
SummaryWriter wraps that resource and the other arguments have no effect.
Returns:
Either a summary writer or an empty object which can be used as a
summary writer.
"""
if logdir is None:
return NoopSummaryWriter()
logdir = str(logdir)
with ops.device("cpu:0"):
if max_queue is None:
max_queue = constant_op.constant(10)
if flush_millis is None:
flush_millis = constant_op.constant(2 * 60 * 1000)
if filename_suffix is None:
filename_suffix = constant_op.constant(".v2")
if name is None:
name = "logdir:" + logdir
return ResourceSummaryWriter(
shared_name=name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_file_writer,
logdir=logdir,
max_queue=max_queue,
flush_millis=flush_millis,
filename_suffix=filename_suffix))
def create_db_writer(db_uri,
experiment_name=None,
run_name=None,
user_name=None,
name=None):
"""Creates a summary database writer in the current context.
This can be used to write tensors from the execution graph directly
to a database. Only SQLite is supported right now. This function
will create the schema if it doesn't exist. Entries in the Users,
Experiments, and Runs tables will be created automatically if they
don't already exist.
Args:
db_uri: For example "file:/tmp/foo.sqlite".
experiment_name: Defaults to YYYY-MM-DD in local time if None.
Empty string means the Run will not be associated with an
Experiment. Can't contain ASCII control characters or <>. Case
sensitive.
run_name: Defaults to HH:MM:SS in local time if None. Empty string
means a Tag will not be associated with any Run. Can't contain
ASCII control characters or <>. Case sensitive.
user_name: Defaults to system username if None. Empty means the
Experiment will not be associated with a User. Must be valid as
both a DNS label and Linux username.
name: Shared name for this SummaryWriter resource stored to default
`tf.Graph`.
Returns:
A `tf.summary.SummaryWriter` instance.
"""
with ops.device("cpu:0"):
if experiment_name is None:
experiment_name = time.strftime("%Y-%m-%d", time.localtime(time.time()))
if run_name is None:
run_name = time.strftime("%H:%M:%S", time.localtime(time.time()))
if user_name is None:
user_name = getpass.getuser()
experiment_name = _cleanse_string(
"experiment_name", _EXPERIMENT_NAME_PATTERNS, experiment_name)
run_name = _cleanse_string("run_name", _RUN_NAME_PATTERNS, run_name)
user_name = _cleanse_string("user_name", _USER_NAME_PATTERNS, user_name)
return ResourceSummaryWriter(
shared_name=name,
init_op_fn=functools.partial(
gen_summary_ops.create_summary_db_writer,
db_uri=db_uri,
experiment_name=experiment_name,
run_name=run_name,
user_name=user_name))
@tf_export("summary.create_noop_writer", v1=[])
def create_noop_writer():
"""Returns a summary writer that does nothing.
This is useful as a placeholder in code that expects a context manager.
"""
return NoopSummaryWriter()
def _cleanse_string(name, pattern, value):
if isinstance(value, six.string_types) and pattern.search(value) is None:
raise ValueError("%s (%s) must match %s" % (name, value, pattern.pattern))
return ops.convert_to_tensor(value, dtypes.string)
def _nothing():
"""Convenient else branch for when summaries do not record."""
return constant_op.constant(False)
@tf_export(v1=["summary.all_v2_summary_ops"])
def all_v2_summary_ops():
"""Returns all V2-style summary ops defined in the current default graph.
This includes ops from TF 2.0 tf.summary and TF 1.x tf.contrib.summary (except
for `tf.contrib.summary.graph` and `tf.contrib.summary.import_event`), but
does *not* include TF 1.x tf.summary ops.
Returns:
List of summary ops, or None if called under eager execution.
"""
if context.executing_eagerly():
return None
return ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
def summary_writer_initializer_op():
"""Graph-mode only. Returns the list of ops to create all summary writers.
Returns:
The initializer ops.
Raises:
RuntimeError: If in Eager mode.
"""
if context.executing_eagerly():
raise RuntimeError(
"tf.contrib.summary.summary_writer_initializer_op is only "
"supported in graph mode.")
return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)
_INVALID_SCOPE_CHARACTERS = re.compile(r"[^-_/.A-Za-z0-9]")
@tf_export("summary.experimental.summary_scope", v1=[])
@tf_contextlib.contextmanager
def summary_scope(name, default_name="summary", values=None):
"""Experimental context manager for use when defining a custom summary op.
This behaves similarly to `tf.name_scope`, except that it returns a generated
summary tag in addition to the scope name. The tag is structurally similar to
the scope name - derived from the user-provided name, prefixed with enclosing
name scopes if any - but we relax the constraint that it be uniquified, as
well as the character set limitation (so the user-provided name can contain
characters not legal for scope names; in the scope name these are removed).
This makes the summary tag more predictable and consistent for the user.
For example, to define a new summary op called `my_op`:
```python
def my_op(name, my_value, step):
with tf.summary.summary_scope(name, "MyOp", [my_value]) as (tag, scope):
my_value = tf.convert_to_tensor(my_value)
return tf.summary.write(tag, my_value, step=step)
```
Args:
name: string name for the summary.
default_name: Optional; if provided, used as default name of the summary.
values: Optional; passed as `values` parameter to name_scope.
Yields:
A tuple `(tag, scope)` as described above.
"""
name = name or default_name
current_scope = ops.get_name_scope()
tag = current_scope + "/" + name if current_scope else name
# Strip illegal characters from the scope name, and if that leaves nothing,
# use None instead so we pick up the default name.
name = _INVALID_SCOPE_CHARACTERS.sub("", name) or None
with ops.name_scope(name, default_name, values, skip_on_eager=False) as scope:
yield tag, scope
@tf_export("summary.write", v1=[])
def write(tag, tensor, step=None, metadata=None, name=None):
"""Writes a generic summary to the default SummaryWriter if one exists.
This exists primarily to support the definition of type-specific summary ops
like scalar() and image(), and is not intended for direct use unless defining
a new type-specific summary op.
Args:
tag: string tag used to identify the summary (e.g. in TensorBoard), usually
generated with `tf.summary.summary_scope`
tensor: the Tensor holding the summary data to write or a callable that
returns this Tensor. If a callable is passed, it will only be called when
a default SummaryWriter exists and the recording condition specified by
`record_if()` is met.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
metadata: Optional SummaryMetadata, as a proto or serialized bytes
name: Optional string name for this op.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with ops.name_scope(name, "write_summary") as scope:
if _summary_state.writer is None:
return constant_op.constant(False)
if step is None:
step = get_step()
if step is None:
raise ValueError("No step set via 'step' argument or "
"tf.summary.experimental.set_step()")
if metadata is None:
serialized_metadata = b""
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = metadata.SerializeToString()
else:
serialized_metadata = metadata
def record():
"""Record the actual summary and return True."""
# Note the identity to move the tensor to the CPU.
with ops.device("cpu:0"):
summary_tensor = tensor() if callable(tensor) else array_ops.identity(
tensor)
write_summary_op = gen_summary_ops.write_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
step,
summary_tensor,
tag,
serialized_metadata,
name=scope)
with ops.control_dependencies([write_summary_op]):
return constant_op.constant(True)
op = smart_cond.smart_cond(
_should_record_summaries_v2(), record, _nothing, name="summary_cond")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
@tf_export("summary.experimental.write_raw_pb", v1=[])
def write_raw_pb(tensor, step=None, name=None):
"""Writes a summary using raw `tf.compat.v1.Summary` protocol buffers.
Experimental: this exists to support the usage of V1-style manual summary
writing (via the construction of a `tf.compat.v1.Summary` protocol buffer)
with the V2 summary writing API.
Args:
tensor: the string Tensor holding one or more serialized `Summary` protobufs
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
name: Optional string name for this op.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
with ops.name_scope(name, "write_raw_pb") as scope:
if _summary_state.writer is None:
return constant_op.constant(False)
if step is None:
step = get_step()
if step is None:
raise ValueError("No step set via 'step' argument or "
"tf.summary.experimental.set_step()")
def record():
"""Record the actual summary and return True."""
# Note the identity to move the tensor to the CPU.
with ops.device("cpu:0"):
raw_summary_op = gen_summary_ops.write_raw_proto_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
step,
array_ops.identity(tensor),
name=scope)
with ops.control_dependencies([raw_summary_op]):
return constant_op.constant(True)
with ops.device("cpu:0"):
op = smart_cond.smart_cond(
_should_record_summaries_v2(), record, _nothing, name="summary_cond")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
def summary_writer_function(name, tensor, function, family=None):
"""Helper function to write summaries.
Args:
name: name of the summary
tensor: main tensor to form the summary
function: function taking a tag and a scope which writes the summary
family: optional, the summary's family
Returns:
The result of writing the summary.
"""
name_scope = ops.get_name_scope()
if name_scope:
# Add a slash to allow reentering the name scope.
name_scope += "/"
def record():
with ops.name_scope(name_scope), summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
with ops.control_dependencies([function(tag, scope)]):
return constant_op.constant(True)
if _summary_state.writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
op = smart_cond.smart_cond(
should_record_summaries(), record, _nothing, name="")
if not context.executing_eagerly():
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
def generic(name, tensor, metadata=None, family=None, step=None):
"""Writes a tensor summary if possible."""
def function(tag, scope):
if metadata is None:
serialized_metadata = constant_op.constant("")
elif hasattr(metadata, "SerializeToString"):
serialized_metadata = constant_op.constant(metadata.SerializeToString())
else:
serialized_metadata = metadata
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
array_ops.identity(tensor),
tag,
serialized_metadata,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def scalar(name, tensor, family=None, step=None):
"""Writes a scalar summary if possible.
Unlike `tf.contrib.summary.generic` this op may change the dtype
depending on the writer, for both practical and efficiency concerns.
Args:
name: An arbitrary name for this summary.
tensor: A `tf.Tensor` Must be one of the following types:
`float32`, `float64`, `int32`, `int64`, `uint8`, `int16`,
`int8`, `uint16`, `half`, `uint32`, `uint64`.
family: Optional, the summary's family.
step: The `int64` monotonic step variable, which defaults
to `tf.compat.v1.train.get_global_step`.
Returns:
The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
"""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_scalar_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def histogram(name, tensor, family=None, step=None):
"""Writes a histogram summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_histogram_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def image(name, tensor, bad_color=None, max_images=3, family=None, step=None):
"""Writes an image summary if possible."""
def function(tag, scope):
bad_color_ = (constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
if bad_color is None else bad_color)
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_image_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
bad_color_,
max_images,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def audio(name, tensor, sample_rate, max_outputs, family=None, step=None):
"""Writes an audio summary if possible."""
def function(tag, scope):
# Note the identity to move the tensor to the CPU.
return gen_summary_ops.write_audio_summary(
_summary_state.writer._resource, # pylint: disable=protected-access
_choose_step(step),
tag,
array_ops.identity(tensor),
sample_rate=sample_rate,
max_outputs=max_outputs,
name=scope)
return summary_writer_function(name, tensor, function, family=family)
def graph(param, step=None, name=None):
"""Writes a TensorFlow graph to the summary interface.
The graph summary is, strictly speaking, not a summary. Conditions
like `tf.summary.should_record_summaries` do not apply. Only
a single graph can be associated with a particular run. If multiple
graphs are written, then only the last one will be considered by
TensorBoard.
When not using eager execution mode, the user should consider passing
the `graph` parameter to `tf.compat.v1.summary.initialize` instead of
calling this function. Otherwise special care needs to be taken when
using the graph to record the graph.
Args:
param: A `tf.Tensor` containing a serialized graph proto. When
eager execution is enabled, this function will automatically
coerce `tf.Graph`, `tf.compat.v1.GraphDef`, and string types.
step: The global step variable. This doesn't have useful semantics
for graph summaries, but is used anyway, due to the structure of
event log files. This defaults to the global step.
name: A name for the operation (optional).
Returns:
The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
Raises:
TypeError: If `param` isn't already a `tf.Tensor` in graph mode.
"""
if not context.executing_eagerly() and not isinstance(param, ops.Tensor):
raise TypeError("graph() needs a tf.Tensor (e.g. tf.placeholder) in graph "
"mode, but was: %s" % type(param))
writer = _summary_state.writer
if writer is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
if isinstance(param, (ops.Graph, graph_pb2.GraphDef)):
tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string)
else:
tensor = array_ops.identity(param)
return gen_summary_ops.write_graph_summary(
writer._resource, _choose_step(step), tensor, name=name) # pylint: disable=protected-access
_graph = graph # for functions with a graph parameter
def import_event(tensor, name=None):
"""Writes a `tf.compat.v1.Event` binary proto.
This can be used to import existing event logs into a new summary writer sink.
Please note that this is lower level than the other summary functions and
will ignore the `tf.summary.should_record_summaries` setting.
Args:
tensor: A `tf.Tensor` of type `string` containing a serialized
`tf.compat.v1.Event` proto.
name: A name for the operation (optional).
Returns:
The created `tf.Operation`.
"""
return gen_summary_ops.import_event(
_summary_state.writer._resource, tensor, name=name) # pylint: disable=protected-access
@tf_export("summary.flush", v1=[])
def flush(writer=None, name=None):
"""Forces summary writer to send any buffered data to storage.
This operation blocks until that finishes.
Args:
writer: The `tf.summary.SummaryWriter` resource to flush.
The thread default will be used if this parameter is None.
Otherwise a `tf.no_op` is returned.
name: A name for the operation (optional).
Returns:
The created `tf.Operation`.
"""
if writer is None:
writer = _summary_state.writer
if writer is None:
return control_flow_ops.no_op()
if isinstance(writer, ResourceSummaryWriter):
resource = writer._resource # pylint: disable=protected-access
else:
# Assume we were passed a raw resource tensor.
resource = writer
with ops.device("cpu:0"):
return gen_summary_ops.flush_summary_writer(resource, name=name)
_flush_fn = flush # for within SummaryWriter.flush()
def eval_dir(model_dir, name=None):
"""Construct a logdir for an eval summary writer."""
return os.path.join(model_dir, "eval" if not name else "eval_" + name)
@deprecation.deprecated(date=None,
instructions="Renamed to create_file_writer().")
def create_summary_file_writer(*args, **kwargs):
"""Please use `tf.contrib.summary.create_file_writer`."""
logging.warning("Deprecation Warning: create_summary_file_writer was renamed "
"to create_file_writer")
return create_file_writer(*args, **kwargs)
def _serialize_graph(arbitrary_graph):
if isinstance(arbitrary_graph, ops.Graph):
return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return arbitrary_graph.SerializeToString()
def _choose_step(step):
if step is None:
return training_util.get_or_create_global_step()
if not isinstance(step, ops.Tensor):
return ops.convert_to_tensor(step, dtypes.int64)
return step
def _check_create_file_writer_args(inside_function, **kwargs):
"""Helper to check the validity of arguments to a create_file_writer() call.
Args:
inside_function: whether the create_file_writer() call is in a tf.function
**kwargs: the arguments to check, as kwargs to give them names.
Raises:
ValueError: if the arguments are graph tensors.
"""
for arg_name, arg in kwargs.items():
if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tensor(arg):
if inside_function:
raise ValueError(
"Invalid graph Tensor argument \"%s=%s\" to create_file_writer() "
"inside an @tf.function. The create call will be lifted into the "
"outer eager execution context, so it cannot consume graph tensors "
"defined inside the function body." % (arg_name, arg))
else:
raise ValueError(
"Invalid graph Tensor argument \"%s=%s\" to eagerly executed "
"create_file_writer()." % (arg_name, arg))
def run_metadata(name, data, step=None):
"""Writes entire RunMetadata summary.
A RunMetadata can contain DeviceStats, partition graphs, and function graphs.
Please refer to the proto for definition of each field.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A RunMetadata proto to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_run_metadata"
# version number = 1
summary_metadata.plugin_data.content = b"1"
with summary_scope(name,
"graph_run_metadata_summary",
[data, step]) as (tag, _):
with ops.device("cpu:0"):
tensor = constant_op.constant(data.SerializeToString(),
dtype=dtypes.string)
return write(
tag=tag,
tensor=tensor,
step=step,
metadata=summary_metadata)
def run_metadata_graphs(name, data, step=None):
"""Writes graphs from a RunMetadata summary.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A RunMetadata proto to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or false if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_run_metadata_graph"
# version number = 1
summary_metadata.plugin_data.content = b"1"
data = config_pb2.RunMetadata(
function_graphs=data.function_graphs,
partition_graphs=data.partition_graphs)
with summary_scope(name,
"graph_run_metadata_graph_summary",
[data, step]) as (tag, _):
with ops.device("cpu:0"):
tensor = constant_op.constant(data.SerializeToString(),
dtype=dtypes.string)
return write(
tag=tag,
tensor=tensor,
step=step,
metadata=summary_metadata)
def keras_model(name, data, step=None):
"""Writes a Keras model as JSON to as a Summary.
Writing the Keras model configuration allows the TensorBoard graph plugin to
render a conceptual graph, as opposed to graph of ops. In case the model fails
to serialize as JSON, it ignores and returns False.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A Keras Model to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = summary_pb2.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = "graph_keras_model"
# version number = 1
summary_metadata.plugin_data.content = b"1"
try:
json_string = data.to_json()
except Exception as exc: # pylint: disable=broad-except
# An exception should not break a model code.
logging.warn("Model failed to serialize as JSON. Ignoring... %s" % exc)
return False
with summary_scope(name, "graph_keras_model", [data, step]) as (tag, _):
with ops.device("cpu:0"):
tensor = constant_op.constant(json_string, dtype=dtypes.string)
return write(
tag=tag,
tensor=tensor,
step=step,
metadata=summary_metadata)
_TraceContext = collections.namedtuple("TraceContext", ("graph", "profiler"))
_current_trace_context_lock = threading.Lock()
_current_trace_context = None
@tf_export("summary.trace_on", v1=[])
def trace_on(graph=True, profiler=False): # pylint: disable=redefined-outer-name
"""Starts a trace to record computation graphs and profiling information.
Must be invoked in eager mode.
When enabled, TensorFlow runtime will collection information that can later be
exported and consumed by TensorBoard. The trace is activated across the entire
TensorFlow runtime and affects all threads of execution.
To stop the trace and export the collected information, use
`tf.summary.trace_export`. To stop the trace without exporting, use
`tf.summary.trace_off`.
Args:
graph: If True, enables collection of executed graphs. It includes ones from
tf.function invocation and ones from the legacy graph mode. The default
is True.
profiler: If True, enables the advanced profiler. Enabling profiler
implicitly enables the graph collection. The profiler may incur a high
memory overhead. The default is False.
"""
if ops.inside_function():
logging.warn("Cannot enable trace inside a tf.function.")
return
if not context.context().executing_eagerly():
logging.warn("Must enable trace in eager mode.")
return
global _current_trace_context
with _current_trace_context_lock:
if _current_trace_context:
logging.warn("Trace already enabled")
return
if graph and not profiler:
context.context().enable_graph_collection()
if profiler:
context.context().enable_run_metadata()
_profiler.start()
_current_trace_context = _TraceContext(graph=graph, profiler=profiler)
@tf_export("summary.trace_export", v1=[])
def trace_export(name, step=None, profiler_outdir=None):
"""Stops and exports the active trace as a Summary and/or profile file.
Stops the trace and exports all metadata collected during the trace to the
default SummaryWriter, if one has been set.
Args:
name: A name for the summary to be written.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
profiler_outdir: Output directory for profiler. This is only used when the
profiler was enabled when the trace was started. In that case, if there is
a logdir-based default SummaryWriter, this defaults to the same directory,
but otherwise the argument must be passed.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
global _current_trace_context
if ops.inside_function():
logging.warn("Cannot export trace inside a tf.function.")
return
if not context.context().executing_eagerly():
logging.warn("Can only export trace while executing eagerly.")
return
with _current_trace_context_lock:
if _current_trace_context is None:
raise ValueError("Must enable trace before export.")
graph, profiler = _current_trace_context # pylint: disable=redefined-outer-name
if profiler_outdir is None \
and isinstance(_summary_state.writer, ResourceSummaryWriter):
logdir = _summary_state.writer._metadata.get("logdir") # pylint: disable=protected-access
if logdir is not None:
profiler_outdir = logdir
if profiler and profiler_outdir is None:
raise ValueError("Must set profiler_outdir or "
"enable summary writer with logdir.")
run_meta = context.context().export_run_metadata()
if graph and not profiler:
run_metadata_graphs(name, run_meta, step)
else:
run_metadata(name, run_meta, step)
if profiler:
_profiler.save(profiler_outdir, _profiler.stop())
trace_off()
@tf_export("summary.trace_off", v1=[])
def trace_off():
"""Stops the current trace and discards any collected information."""
global _current_trace_context
with _current_trace_context_lock:
_current_trace_context = None
# Disabling run_metadata disables graph collection as well.
context.context().disable_run_metadata()
# profiler only has start and stop. One needs to stop in order to export
# and stopping when it is not running will raise an error.
try:
_profiler.stop()
except _profiler.ProfilerNotRunningError:
pass
| {
"content_hash": "fda768ad3d892b522915bf474486cd3d",
"timestamp": "",
"source": "github",
"line_count": 1264,
"max_line_length": 104,
"avg_line_length": 36.55537974683544,
"alnum_prop": 0.6912738605375925,
"repo_name": "gunan/tensorflow",
"id": "91b8e61b3411f6d3dd97d731c48e19800207cce2",
"size": "46896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/summary_ops_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45924"
},
{
"name": "C",
"bytes": "774953"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "77908225"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "104215"
},
{
"name": "Go",
"bytes": "1841471"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "962443"
},
{
"name": "Jupyter Notebook",
"bytes": "556650"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1479029"
},
{
"name": "Makefile",
"bytes": "58603"
},
{
"name": "Objective-C",
"bytes": "104667"
},
{
"name": "Objective-C++",
"bytes": "297830"
},
{
"name": "PHP",
"bytes": "23994"
},
{
"name": "Pascal",
"bytes": "3739"
},
{
"name": "Pawn",
"bytes": "17039"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "39476740"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "650007"
},
{
"name": "Smarty",
"bytes": "34649"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import re
import struct
from .util import open_if_filename, tryint
from .genres import genre_by_index
HEADER_SIZE = 8
re_atom_type = re.compile(r'[A-Za-z0-9\-©]{4}')
def read_atom_header(readfunc, offset):
header = readfunc(offset, HEADER_SIZE)
if len(header) == HEADER_SIZE:
size, byte_type = struct.unpack('!i4s', header)
str_type = str(byte_type, 'latin-1')
return (size, str_type)
else:
return ()
def is_valid_atom_type(atom_type):
return re_atom_type.match(atom_type)
return True
# Base atom classes *****************************************
class Atom:
cls_data_model = ''
def __init__(self, parent, start_offset, header=None):
"""parent is anything that has a read method"""
self.parent = parent
self.start_offset = start_offset
self.size = 0
self.type = ''
self._valid = False
self._data = None
if header is None:
header = read_atom_header(self.read, -HEADER_SIZE)
if header:
self.size, self.type = header
self._valid = True
#--- Protected
def _get_data_model(self):
return self.cls_data_model
def _read_atom_data(self):
dm = '!' + self._get_data_model()
if '*s' in dm:
prevsize = struct.calcsize(dm.replace('*s', ''))
dm = dm.replace('*s', '%ds' % (self.content_size - prevsize), 1).replace('*s', '')
self._datasize = struct.calcsize(dm)
data = self.read(0, self._datasize)
if len(data) < self._datasize:
data = data.ljust(self._datasize)
return struct.unpack(dm, data)
#--- Public
def read(self, startat=0, readcount=-1):
if readcount < 0:
readcount = self.content_size
return self.parent.read(self.start_offset + HEADER_SIZE + startat, readcount)
#--- Properties
@property
def content_size(self):
return self.size - HEADER_SIZE
@property
def data(self):
if self._data is None:
self._data = self._read_atom_data()
return self._data
@property
def valid(self):
return self._valid
class AtomBox(Atom):
def __init__(self, parent, start_offset, header=None):
Atom.__init__(self, parent, start_offset, header)
self._children = None
#--- Protected
def _read_children(self):
children = []
self.data #pre-read data
#self.data[-1] is the data of the children
startat = self._datasize
while startat < self.content_size:
header = read_atom_header(self.read, startat)
if not header:
break
if header[0] == 0: #when size is zero, it takes the rest of the atom
header = (self.content_size - startat, header[1])
if header[0] < HEADER_SIZE: #safeguard
header = (HEADER_SIZE, header[1])
if is_valid_atom_type(header[1]):
subatom = self._get_atom_class(header[1])(self, startat, header)
children.append(subatom)
startat += header[0]
return tuple(children)
def _get_atom_class(self, type):
return ATOM_SPECS.get(type, Atom)
#--- Public
def find(self, atom_type):
gotta_find = atom_type[:4]
# You'd think that iterating through atoms is slow and that there should be a {type:atom}
# mapping, but the tests I've done on real data shows that doing so is in fact slower.
# I think this is because most atoms have only a few subatoms.
for atom in self.atoms:
if atom.type == gotta_find:
if len(atom_type) >= 9:
return atom.find(atom_type[5:])
else:
return atom
#--- Properties
@property
def atoms(self):
if self._children is None:
self._children = self._read_children()
return self._children
#Specific atoms *************************************************************
class AttributeAtom(AtomBox):
def _get_atom_class(self, type):
return AttributeDataAtom
@property
def attr_data(self):
try:
return self.atoms[0].attr_data
except IndexError:
# For some reason, our attribute atom has no data sub-atom, no biggie, just return nothing.
return ''
class AttributeDataAtom(Atom):
def _get_data_model(self, integer_type='i'):
[data_type] = struct.unpack('!i', self.read(0, 4))
return '2i' + (integer_type if data_type == 0 else '*s')
def _read_atom_data(self):
result = Atom._read_atom_data(self)
#Convert to unicode if needed
if isinstance(result[2], bytes):
result = list(result)
result[2] = result[2].decode('utf-8', 'ignore')
result = tuple(result)
return result
@property
def attr_data(self):
return self.data[2]
class EsdsAtom(Atom):
cls_data_model = '26si'
@property
def bitrate(self):
return self.data[1]
class GnreAtom(AttributeAtom):
def _get_atom_class(self, type):
return GnreDataAtom
class GnreDataAtom(AttributeDataAtom):
def _get_data_model(self):
return AttributeDataAtom._get_data_model(self, 'H')
class MetaAtom(AtomBox):
cls_data_model = 'i'
class MdhdAtom(Atom):
def _get_data_model(self):
[version] = struct.unpack('B', self.read(0, 1))
return '20s2i' if version > 0 else '12s2i'
@property
def sample_rate(self):
return self.data[1]
@property
def duration(self):
return self.data[2]
class StsdAtom(AtomBox):
def _get_data_model(self):
[version] = struct.unpack('4s', self.read(12, 4))
if version in (b'mp4v', b'avc1', b'encv', b's263'):
return'94s'
elif version in (b'mp4a', b'drms', b'enca', b'samr', b'sawb'):
return '44s'
else:
return '24s'
ATOM_SPECS = {
'©nam': AttributeAtom,
'©ART': AttributeAtom,
'©wrt': AttributeAtom,
'©alb': AttributeAtom,
'©too': AttributeAtom,
'©day': AttributeAtom,
'©cmt': AttributeAtom,
'©gen': AttributeAtom,
'data': AttributeDataAtom,
'esds': EsdsAtom,
'gnre': GnreAtom,
'ilst': AtomBox,
'mdhd': MdhdAtom,
'mdia': AtomBox,
'meta': MetaAtom,
'minf': AtomBox,
'moov': AtomBox,
'stbl': AtomBox,
'stsd': StsdAtom,
'trak': AtomBox,
'trkn': AttributeAtom,
'udta': AtomBox,
}
# Mp4 File **********************************************************
class File(AtomBox):
def __init__(self, infile):
self._fp, self._shouldclose = open_if_filename(infile, 'rb')
self._fp.seek(0, 2)
AtomBox.__init__(self, None, 0, (self._fp.tell(), 'root'))
def _get_attr(self, path):
atom = self.find(path)
return atom.attr_data if atom else ''
def close(self):
if self._fp and self._shouldclose:
self._fp.close()
self._fp = None
def read(self, startat=0, readcount=-1):
if startat < 0:
startat = 0
self._fp.seek(startat)
return self._fp.read(readcount)
@property
def album(self):
return self._get_attr('moov.udta.meta.ilst.©alb')
@property
def artist(self):
return self._get_attr('moov.udta.meta.ilst.©ART')
@property
def audio_offset(self):
atoms = [a for a in self.atoms if (a.size > 8) and (a.type == 'mdat')]
return atoms[0].start_offset if atoms else 0
@property
def audio_size(self):
atoms = [a for a in self.atoms if (a.size > 8) and (a.type == 'mdat')]
return atoms[0].size if atoms else 0
@property
def bitrate(self):
atom = self.find('moov.trak.mdia.minf.stbl.stsd.esds')
return atom.bitrate // 1000 if atom else 0
@property
def comment(self):
return self._get_attr('moov.udta.meta.ilst.©cmt')
@property
def duration(self):
atom = self.find('moov.trak.mdia.mdhd')
return atom.duration // self.sample_rate if atom else 0
@property
def genre(self):
data = self._get_attr('moov.udta.meta.ilst.gnre')
if not data:
data = self._get_attr('moov.udta.meta.ilst.©gen')
if isinstance(data, str):
return data
elif isinstance(data, int):
return genre_by_index(data - 1)
else:
return ''
@property
def sample_rate(self):
atom = self.find('moov.trak.mdia.mdhd')
return atom.sample_rate if atom else 0
@property
def title(self):
return self._get_attr('moov.udta.meta.ilst.©nam')
@property
def track(self):
return tryint(self._get_attr('moov.udta.meta.ilst.trkn'))
@property
def valid(self):
return self.find('mdat') is not None
@property
def year(self):
return self._get_attr('moov.udta.meta.ilst.©day')[:4]
| {
"content_hash": "ccfda70829856908f8385e4904579d21",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 103,
"avg_line_length": 28.68944099378882,
"alnum_prop": 0.5513098073176013,
"repo_name": "jmtchllrx/pyMuse",
"id": "be79587936bb18a8d26804afd88643b22b86a829",
"size": "9587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hsaudiotag/mp4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "123941"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from mox import IsA # noqa
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class SwiftApiTests(test.APITestCase):
def test_swift_get_containers(self):
containers = self.containers.list()
cont_data = [c._apidict for c in containers]
swift_api = self.stub_swiftclient()
swift_api.get_account(limit=1001,
marker=None,
full_listing=True).AndReturn([{}, cont_data])
self.mox.ReplayAll()
(conts, more) = api.swift.swift_get_containers(self.request)
self.assertEqual(len(containers), len(conts))
self.assertFalse(more)
def test_swift_get_container_with_data(self):
container = self.containers.first()
objects = self.objects.list()
swift_api = self.stub_swiftclient()
swift_api.get_object(container.name, "") \
.AndReturn((container, objects))
self.mox.ReplayAll()
cont = api.swift.swift_get_container(self.request, container.name)
self.assertEqual(container.name, cont.name)
self.assertEqual(len(objects), len(cont.data))
def test_swift_get_container_without_data(self):
container = self.containers.first()
swift_api = self.stub_swiftclient()
swift_api.head_container(container.name).AndReturn(container)
self.mox.ReplayAll()
cont = api.swift.swift_get_container(self.request,
container.name,
with_data=False)
self.assertEqual(cont.name, container.name)
self.assertIsNone(cont.data)
def test_swift_create_duplicate_container(self):
metadata = {'is_public': False}
container = self.containers.first()
headers = api.swift._metadata_to_header(metadata=(metadata))
swift_api = self.stub_swiftclient()
# Check for existence, then create
exc = self.exceptions.swift
swift_api.head_container(container.name).AndRaise(exc)
swift_api.put_container(container.name, headers=headers) \
.AndReturn(container)
self.mox.ReplayAll()
# Verification handled by mox, no assertions needed.
api.swift.swift_create_container(self.request,
container.name,
metadata=(metadata))
def test_swift_create_container(self):
metadata = {'is_public': True}
container = self.containers.first()
swift_api = self.stub_swiftclient()
swift_api.head_container(container.name).AndReturn(container)
self.mox.ReplayAll()
# Verification handled by mox, no assertions needed.
with self.assertRaises(exceptions.AlreadyExists):
api.swift.swift_create_container(self.request,
container.name,
metadata=(metadata))
def test_swift_update_container(self):
metadata = {'is_public': True}
container = self.containers.first()
swift_api = self.stub_swiftclient()
headers = api.swift._metadata_to_header(metadata=(metadata))
swift_api.post_container(container.name, headers=headers)\
.AndReturn(container)
self.mox.ReplayAll()
# Verification handled by mox, no assertions needed.
api.swift.swift_update_container(self.request,
container.name,
metadata=(metadata))
def test_swift_get_objects(self):
container = self.containers.first()
objects = self.objects.list()
swift_api = self.stub_swiftclient()
swift_api.get_container(container.name,
limit=1001,
marker=None,
prefix=None,
delimiter='/',
full_listing=True).AndReturn([{}, objects])
self.mox.ReplayAll()
(objs, more) = api.swift.swift_get_objects(self.request,
container.name)
self.assertEqual(len(objects), len(objs))
self.assertFalse(more)
def test_swift_get_object_with_data_non_chunked(self):
container = self.containers.first()
object = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.get_object(
container.name, object.name, resp_chunk_size=None
).AndReturn([object, object.data])
self.mox.ReplayAll()
obj = api.swift.swift_get_object(self.request, container.name,
object.name, resp_chunk_size=None)
self.assertEqual(object.name, obj.name)
def test_swift_get_object_with_data_chunked(self):
container = self.containers.first()
object = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.get_object(
container.name, object.name, resp_chunk_size=api.swift.CHUNK_SIZE
).AndReturn([object, object.data])
self.mox.ReplayAll()
obj = api.swift.swift_get_object(
self.request, container.name, object.name)
self.assertEqual(object.name, obj.name)
def test_swift_get_object_without_data(self):
container = self.containers.first()
object = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.head_object(container.name, object.name) \
.AndReturn(object)
self.mox.ReplayAll()
obj = api.swift.swift_get_object(self.request,
container.name,
object.name,
with_data=False)
self.assertEqual(object.name, obj.name)
self.assertIsNone(obj.data)
def test_swift_upload_object(self):
container = self.containers.first()
obj = self.objects.first()
fake_name = 'fake_object.jpg'
class FakeFile(object):
def __init__(self):
self.name = fake_name
self.data = obj.data
self.size = len(obj.data)
headers = {'X-Object-Meta-Orig-Filename': fake_name}
swift_api = self.stub_swiftclient()
test_file = FakeFile()
swift_api.put_object(container.name,
obj.name,
IsA(FakeFile),
content_length=test_file.size,
headers=headers)
self.mox.ReplayAll()
api.swift.swift_upload_object(self.request,
container.name,
obj.name,
test_file)
def test_swift_upload_object_without_file(self):
container = self.containers.first()
obj = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.put_object(container.name,
obj.name,
None,
content_length=0,
headers={})
self.mox.ReplayAll()
response = api.swift.swift_upload_object(self.request,
container.name,
obj.name,
None)
self.assertEqual(0, response['bytes'])
def test_swift_object_exists(self):
container = self.containers.first()
obj = self.objects.first()
swift_api = self.stub_swiftclient()
swift_api.head_object(container.name, obj.name).AndReturn(container)
exc = self.exceptions.swift
swift_api.head_object(container.name, obj.name).AndRaise(exc)
self.mox.ReplayAll()
args = self.request, container.name, obj.name
self.assertTrue(api.swift.swift_object_exists(*args))
# Again, for a "non-existent" object
self.assertFalse(api.swift.swift_object_exists(*args))
| {
"content_hash": "3188f23cba5cc8fcfe7e2f49e447c7a6",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 77,
"avg_line_length": 38.91121495327103,
"alnum_prop": 0.551579200192146,
"repo_name": "mandeepdhami/horizon",
"id": "3f244dc30bc71baf271af06e75e44222fb5c7985",
"size": "9091",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/api_tests/swift_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "94677"
},
{
"name": "HTML",
"bytes": "475954"
},
{
"name": "JavaScript",
"bytes": "807606"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4687618"
},
{
"name": "Shell",
"bytes": "18657"
}
],
"symlink_target": ""
} |
"""
isotropic_turbulent_suspension_with_settling_and_bleaching.py
Example of a continuous-time, stochastic, pair-based cellular automaton model,
which simulates the diffusion of suspended particles in a turbulent fluid.
Particles start with an accumulated luminescence signal L = 1, and are bleached
by exposure to light at a rate that depends on distance below the upper surface.
Written by Greg Tucker, July 2015
"""
from __future__ import print_function # for both python 2 and 3 compability
import time
import matplotlib
from pylab import figure, show, clf
from numpy import where, exp, amin
from landlab import RasterModelGrid, ModelParameterDictionary
from landlab.plot.imshow import imshow_node_grid
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.oriented_raster_cts import OrientedRasterCTS
class TurbulentSuspensionAndBleachingModel(OrientedRasterCTS):
"""
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 4
... model_grid_column__count: number of columns in grid
... 4
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 2.0
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.node_state
array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> tsbm.grid.at_node['osl']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> tsbm.n_xn
array([0, 1, 1, 0, 0, 1, 1, 0])
>>> tsbm.fluid_surface_height
3.5
"""
def __init__(self, input_stream):
"""
Reads in parameters and initializes the model.
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 4
... model_grid_column__count: number of columns in grid
... 4
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 2.0
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.node_state
array([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
>>> tsbm.grid.at_node['osl']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.,
0., 0., 0.])
>>> tsbm.n_xn
array([0, 1, 1, 0, 0, 1, 1, 0])
>>> tsbm.fluid_surface_height
3.5
"""
# Get a source for input parameters.
params = ModelParameterDictionary(input_stream)
# Read user-defined parameters
nr = params.read_int('model_grid_row__count') # number of rows (CSDMS Standard Name [CSN])
nc = params.read_int('model_grid_column__count') # number of cols (CSN)
self.plot_interval = params.read_float('plot_interval') # interval for plotting output, s
self.run_duration = params.read_float('model__run_time') # duration of run, sec (CSN)
self.report_interval = params.read_float('model__report_interval') # report interval, in real-time seconds
self.bleach_T0 = params.read_float('surface_bleaching_time_scale') # time scale for bleaching at fluid surface, s
self.zstar = params.read_float('light_attenuation_length') # length scale for light attenuation in fluid, CELLS
# Derived parameters
self.fluid_surface_height = nr-0.5
# Calculate when we next want to report progress.
self.next_report = time.time() + self.report_interval
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'fluid', 1 : 'particle' }
xn_list = self.setup_transition_list()
# Create the node-state array and attach it to the grid
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)
# For visual display purposes, set all boundary nodes to fluid
node_state_grid[mg.closed_boundary_nodes] = 0
# Initialize the node-state array: here, the initial condition is a pile of
# resting grains at the bottom of a container.
bottom_rows = where(mg.node_y<0.4*nr)[0]
node_state_grid[bottom_rows] = 1
# Create a data array for bleaching.
# Here, osl=optically stimulated luminescence, normalized to the original
# signal (hence, initially all unity). Over time this signal may get
# bleached out due to exposure to light.
self.osl = mg.add_zeros('node', 'osl')
self.osl[bottom_rows] = 1.0
self.osl_display = mg.add_zeros('node', 'osl_display')
self.osl_display[bottom_rows] = 1.0
# We'll need an array to track the last time any given node was
# updated, so we can figure out the duration of light exposure between
# update events
self.last_update_time = mg.add_zeros('node','last_update_time')
# Call the base class (RasterCTS) init method
super(TurbulentSuspensionAndBleachingModel, \
self).__init__(mg, ns_dict, xn_list, node_state_grid, prop_data=self.osl)
# Set up plotting (if plotting desired)
if self.plot_interval <= self.run_duration:
self.initialize_plotting()
def initialize_plotting(self):
"""
Creates a CA plotter object, sets its colormap, and plots the initial
model state.
"""
# Set up some plotting information
grain = '#5F594D'
bleached_grain = '#CC0000'
fluid = '#D0E4F2'
clist = [fluid,bleached_grain,grain]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
self.ca_plotter = CAPlotter(self, cmap=my_cmap)
# Plot the initial grid
self.ca_plotter.update_plot()
# Make a colormap for use in showing the bleaching of each grain
clist = [(0.0, (1.0, 1.0, 1.0)), (0.49, (0.8, 0.8, 0.8)), (1.0, (0.0, 0.0, 0.0))]
self.cmap_for_osl = matplotlib.colors.LinearSegmentedColormap.from_list('osl_cmap', clist)
def setup_transition_list(self):
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain, tea leaf, or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 10.0
2 (1-0) 1 (0-1) right motion 10.0
3 (1-1) (none) - -
4 (0-0) (none) - -
5 (0-1) 2 (1-0) down motion 10.55
6 (1-0) 1 (0-1) up motion 9.45
7 (1-1) (none) - -
"""
# Create an empty transition list
xn_list = []
# Append four transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left cell, right cell, orientation [0=horizontal])
# - Tuple representing new pair state
# (bottom cell, top cell, orientation [1=vertical])
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
# - Flag indicating that the transition involves an exchange of properties
# - Function to be called after each transition, to update a property
# (in this case, to simulate bleaching of the luminescence signal)
xn_list.append( Transition((0,1,0), (1,0,0), 10., 'left motion', True, self.update_bleaching) )
xn_list.append( Transition((1,0,0), (0,1,0), 10., 'right motion', True, self.update_bleaching) )
xn_list.append( Transition((0,1,1), (1,0,1), 10.55, 'down motion', True, self.update_bleaching) )
xn_list.append( Transition((1,0,1), (0,1,1), 9.45, 'up motion', True, self.update_bleaching) )
return xn_list
def bleach_grain(self, node, dt):
"""
Updates the luminescence signal at node.
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.bleach_grain(10, 1.0)
>>> int(tsbm.prop_data[tsbm.propid[10]]*1000)
858
"""
depth = self.fluid_surface_height - self.grid.node_y[node]
T_bleach = self.bleach_T0*exp( depth/self.zstar)
self.prop_data[self.propid[node]] *= exp( -dt/T_bleach )
def update_bleaching(self, ca_unused, node1, node2, time_now):
"""
Updates the luminescence signal at a pair of nodes that have just
undergone a transition, if either or both nodes is a grain.
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.update_bleaching(tsbm, 10, 13, 1.0)
>>> int(tsbm.prop_data[tsbm.propid[10]]*1000)
858
>>> tsbm.prop_data[tsbm.propid[13]]
0.0
"""
if self.node_state[node1]==1:
dt = time_now - self.last_update_time[self.propid[node1]]
self.bleach_grain(node1, dt)
self.last_update_time[self.propid[node1]] = time_now
if self.node_state[node2]==1:
dt = time_now - self.last_update_time[self.propid[node2]]
self.bleach_grain(node2, dt)
self.last_update_time[self.propid[node2]] = time_now
def synchronize_bleaching(self, sync_time):
"""
Brings all nodes up to the same time, sync_time, by applying bleaching
up to this time, and updating last_update_time.
Notes
-----
In a CellLab-CTS model, the "time" is usually different for each node:
some will have only just recently undergone a transition and had their
properties (in this case, OSL bleaching) updated, while others will
have last been updated a long time ago, and some may never have had a
transition. If we want to plot the properties at a consistent time, we
need to bring all node properties (again, in this case, OSL) up to
date. This method does so.
We multiply elapsed time (between last update and "sync time") by
the node state, because we only want to update the solid particles---
because the state of a particle is 1 and fluid 0, this multiplication
masks out the fluid nodes.
We don't call bleach_grain(), because we want to take advantage of
numpy array operations rather than calling a method for each node.
Example
-------
>>> from six import StringIO
>>> p = StringIO('''
... model_grid_row__count: number of rows in grid
... 10
... model_grid_column__count: number of columns in grid
... 3
... plot_interval: interval for plotting to display, s
... 2.0
... model__run_time: duration of model run, s
... 1.0
... model__report_interval: time interval for reporting progress, real-time seconds
... 1.0e6
... surface_bleaching_time_scale: time scale for OSL bleaching, s
... 2.42
... light_attenuation_length: length scale for light attenuation, cells (1 cell = 1 mm)
... 6.5
... ''')
>>> tsbm = TurbulentSuspensionAndBleachingModel(p)
>>> tsbm.synchronize_bleaching(1.0)
>>> int(tsbm.osl[10]*100000)
85897
"""
dt = (sync_time - self.last_update_time[self.propid])*self.node_state
assert (amin(dt)>=0.0), 'sync_time must be >= 0 everywhere'
depth = self.fluid_surface_height - self.grid.node_y
T_bleach = self.bleach_T0*exp( depth/self.zstar)
self.prop_data[self.propid] *= exp( -dt/T_bleach )
self.last_update_time[self.propid] = sync_time*self.node_state
def go(self):
"""
Runs the model.
"""
# RUN
while self.current_time < self.run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= self.next_report:
print('Current sim time',self.current_time,'(',100*self.current_time/self.run_duration,'%)')
self.next_report = current_real_time + self.report_interval
# Run the model forward in time until the next output step
self.run(self.current_time+self.plot_interval, self.node_state,
plot_each_transition=False)
self.current_time += self.plot_interval
self.synchronize_bleaching(self.current_time)
if self.plot_interval <= self.run_duration:
# Plot the current grid
self.ca_plotter.update_plot()
# Display the OSL content of grains
figure(3)
clf()
self.osl_display[:] = self.osl[self.propid]+self.node_state
imshow_node_grid(self.grid, 'osl_display', limits=(0.0, 2.0),
cmap=self.cmap_for_osl)
show()
figure(1)
def finalize(self):
# FINALIZE
# Plot
self.ca_plotter.finalize()
# If user runs this file, activate the main() function.
if __name__ == "__main__":
# Parse command-line argument, if any
import sys
if len(sys.argv)>1:
input_file_name = sys.argv[1]
else:
input_file_name = 'tsbm_inputs.txt'
# Instantiate the model
ca_model = TurbulentSuspensionAndBleachingModel(input_file_name)
# Run the model
ca_model.go()
# Clean up
ca_model.finalize()
| {
"content_hash": "cd08eafed3554ba5519a9133b3cbb73d",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 122,
"avg_line_length": 39.81042654028436,
"alnum_prop": 0.5791071428571428,
"repo_name": "laijingtao/landlab",
"id": "3192053bfa0cce5e8407068931198d4fdbd2eba1",
"size": "16819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "landlab/ca/examples/turbulent_suspension_with_settling_and_bleaching.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1452"
},
{
"name": "PowerShell",
"bytes": "7128"
},
{
"name": "Python",
"bytes": "3084328"
},
{
"name": "Shell",
"bytes": "3175"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SubscriptionPlan.theme_change'
db.add_column('subscriptions_subscriptionplan', 'theme_change', self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=11, decimal_places=2), keep_default=False)
# Adding field 'Feature.theme_change'
db.add_column('subscriptions_feature', 'theme_change', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'SubscriptionPlan.theme_change'
db.delete_column('subscriptions_subscriptionplan', 'theme_change')
# Deleting field 'Feature.theme_change'
db.delete_column('subscriptions_feature', 'theme_change')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'charge_on_card_as': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'default': "'contact@yourstore.com'", 'max_length': '75'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'subscriptions.feature': {
'Meta': {'object_name': 'Feature'},
'auctions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'credit_card': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'custom_dns': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'google_analytics': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'google_checkout': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'manual_payment': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'paypal': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'show_attendance': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'theme_change': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'wishlist': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'subscriptions.featurepayment': {
'Meta': {'object_name': 'FeaturePayment'},
'feature': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'subscriptions.subscription': {
'Meta': {'object_name': 'Subscription'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subscriptions.SubscriptionPlan']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '1'}),
'subscription_id': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'subscriptions.subscriptioncancelation': {
'Meta': {'object_name': 'SubscriptionCancelation'},
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subscriptions.Subscription']"})
},
'subscriptions.subscriptionpayment': {
'Meta': {'object_name': 'SubscriptionPayment'},
'amount': ('django.db.models.fields.FloatField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subscriptions.Subscription']"})
},
'subscriptions.subscriptionplan': {
'Meta': {'object_name': 'SubscriptionPlan'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'additional_payment_price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'additional_shipping_price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'admin_accounts': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'auto_tax_integration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'billing_period': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'collect_emails': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'community_support_in_forums': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'community_wish_list': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'concurrent_auction_items': ('django.db.models.fields.PositiveIntegerField', [], {}),
'concurrent_store_items': ('django.db.models.fields.PositiveIntegerField', [], {}),
'create_auctions': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'custom_domain_name_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'custom_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'customizable_shipping_rates': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.TextField', [], {}),
'email_support_price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'google_analytics_support': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'listings_per_month': ('django.db.models.fields.PositiveIntegerField', [], {}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'online_help_center': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'payment_methods': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'pictures_per_item': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'pictures_per_lot': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'plan_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'secret_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'shopping_cart': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'show_attendance': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'theme_change': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'}),
'total_data_transfer': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'total_mbs_storage': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'total_store_revenue': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'trial_period': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'voice_support_price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '11', 'decimal_places': '2'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'photo': ('core.thumbs.ImageWithThumbsField', [], {'max_length': '100'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['subscriptions']
| {
"content_hash": "9e2ab166fec143b8fe07a4f90b70a289",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 186,
"avg_line_length": 82.5421052631579,
"alnum_prop": 0.5635401390040171,
"repo_name": "StephenPower/CollectorCity-Market-Place",
"id": "37d57016b229c737d0bd274616b0d6e56f984923",
"size": "15701",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stores/apps/subscriptions/migrations/0007_auto__add_field_subscriptionplan_theme_change__add_field_feature_theme.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "796501"
},
{
"name": "Python",
"bytes": "1860719"
},
{
"name": "Shell",
"bytes": "1174"
}
],
"symlink_target": ""
} |
__author__ = 'Kevin'
#Running on python2.7
import ConfigParser
config = ConfigParser.ConfigParser()
cfg_file = open('config.ini', 'r+')
config.readfp(cfg_file)
#
def check_input():
y_or_n = raw_input()
if y_or_n == 'y':
return True
elif y_or_n != 'n':
print("Input error")
raise ValueError("Needed to input 'y' or 'n'")
else:
return False
#Sets the reddit_username of the user and also creates the [Subs]
#section in the .ini file
def set_user(cfg_file, config):
is_correct = False
while is_correct == False:
username = raw_input('Please enter your Reddit username: ')
print 'Is', username, 'correct? (y/n)'
is_correct = check_input()
config.add_section('User')
config.set('User','Reddit_Username',username)
config.add_section('Subs')
config.write(cfg_file)
if len(config.sections()) == 0:
print("Setting Reddit Username...")
set_user(cfg_file, config)
def show_current_config(cfg_file, config):
print('Your current subreddit configuration:')
print ''
print '{0:30}'.format('Subreddit'),'Upvotes_Required'
options_list = config.options('Subs')
for option in options_list:
print '{0:30}'.format(option), config.get('Subs',option)
print ''
show_current_config(cfg_file, config)
#Iterates through all currents subs and asks user if they want them
#removed
def remove_subs(config):
for option in config.options('Subs'):
print 'Would you like to remove', option, '? (y/n)'
should_remove = check_input()
if should_remove:
config.remove_option('Subs',option)
#TODO not displaying correctly. config.write is causing data to duplicate
empty_file = open('config.ini', 'r+')
empty_file.truncate()
config.write(empty_file)
def add_subs(config):
is_done = False
while is_done == False:
is_correct = False
while (is_correct == False):
print 'What is the sub you would like to add? (Enter just the subreddit name, without the /r/'
sub_name = raw_input()
print 'is', sub_name, 'correct? (y/n) '
is_correct = check_input()
if (is_correct):
is_correct = False
upvotes = -1
while is_correct == False:
upvotes = int(raw_input('How many upvotes should it take for a post to qualify for your Summary? (Integers only)'))
print 'is', upvotes, 'correct? (y/n) '
is_correct = check_input()
config.set('Subs',sub_name, upvotes)
print 'Are you done adding subs? (y/n)'
is_done = check_input()
empty_file = open('config.ini', 'r+')
empty_file.truncate()
config.write(empty_file)
#Iterates through current results, and then asks
#if more should be added on
def modify_subs(cfg_file, config):
#TODO iterate through current results, and then allow for more
#to be added on
print 'Would you like to remove current subs? (y/n) '
to_remove = check_input()
if to_remove:
cfg_file.close()
remove_subs(config)
cfg_file = open('config.ini', 'r+')
#Ask to add new subs
print 'Would you like to add new subs? (y/n) '
to_add = check_input()
if to_add:
cfg_file.close()
add_subs(config)
cfg_file = open('config.ini', 'r+')
print 'Would you like to modify your subs? (y/n) '
need_to_change = check_input()
if (need_to_change):
modify_subs(cfg_file, config)
cfg_file.close()
| {
"content_hash": "9fe3786b8dd386af3ce296ad44122d7e",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 135,
"avg_line_length": 29.741666666666667,
"alnum_prop": 0.6066124964976184,
"repo_name": "KevinConti/Summary",
"id": "2928d6760339c753aa3f71027173c56918f4a61a",
"size": "3569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3656"
}
],
"symlink_target": ""
} |
import os
import uuid
import pytest
import sqlalchemy
import tink
from snippets.cloud_kms_env_aead import init_tink_env_aead
from snippets.cloud_sql_connection_pool import init_db
from snippets.encrypt_and_insert_data import encrypt_and_insert_data
from snippets.query_and_decrypt_data import query_and_decrypt_data
table_name = f"votes_{uuid.uuid4().hex}"
@pytest.fixture(name="pool")
def setup_pool() -> sqlalchemy.engine.Engine:
try:
db_user = os.environ["SQLSERVER_USER"]
db_pass = os.environ["SQLSERVER_PASSWORD"]
db_name = os.environ["SQLSERVER_DATABASE"]
db_host = os.environ["SQLSERVER_HOST"]
except KeyError:
raise Exception(
"The following env variables must be set to run these tests:"
"SQLSERVER_USER, SQLSERVER_PASSWORD, SQLSERVER_DATABASE, SQLSERVER_HOST")
else:
pool = init_db(
db_user=db_user,
db_pass=db_pass,
db_name=db_name,
table_name=table_name,
db_host=db_host,
)
yield pool
with pool.connect() as conn:
conn.execute(f"DROP TABLE IF EXISTS {table_name}")
@pytest.fixture(name="env_aead")
def setup_key() -> tink.aead.KmsEnvelopeAead:
credentials = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "")
key_uri = "gcp-kms://" + os.environ["CLOUD_KMS_KEY"]
env_aead = init_tink_env_aead(key_uri, credentials)
yield env_aead
def test_query_and_decrypt_data(
capsys: pytest.CaptureFixture,
pool: sqlalchemy.engine.Engine,
env_aead: tink.aead.KmsEnvelopeAead
) -> None:
# Insert data into table before testing
encrypt_and_insert_data(
pool,
env_aead,
table_name,
"SPACES",
"hello@example.com")
query_and_decrypt_data(pool, env_aead, table_name)
captured = capsys.readouterr()
assert "Team\tEmail\tTime Cast" in captured.out
assert "hello@example.com" in captured.out
| {
"content_hash": "04b52d57429d3d963223fbdf501dbec8",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 85,
"avg_line_length": 28.15714285714286,
"alnum_prop": 0.6519533231861999,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "2dada45fb0ee872590fd725255af28bfaf14a5b4",
"size": "2546",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cloud-sql/sql-server/client-side-encryption/snippets/query_and_decrypt_data_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
} |
"""
File backends with small tweaks to work with gunicorn + eventlet async
workers. These should eventually becom unecessary as the supporting libraries
continue to improve.
"""
import eventlet
from django.utils.deconstruct import deconstructible
from athumb.backends.s3boto import S3BotoStorage, S3BotoStorage_AllPublic
def eventlet_workaround(bytes_transmitted, bytes_remaining):
"""
Stinks we have to do this, but calling this at intervals keeps gunicorn
eventlet async workers from hanging and expiring.
"""
eventlet.sleep(0)
@deconstructible
class EventletS3BotoStorage(S3BotoStorage):
"""
Modified standard S3BotoStorage class to play nicely with large file
uploads and eventlet gunicorn workers.
"""
def __init__(self, *args, **kwargs):
super(EventletS3BotoStorage, self).__init__(*args, **kwargs)
# Use the workaround as Boto's set_contents_from_file() callback.
self.s3_callback_during_upload = eventlet_workaround
@deconstructible
class EventletS3BotoStorage_AllPublic(S3BotoStorage_AllPublic):
"""
Modified standard S3BotoStorage_AllPublic class to play nicely with large
file uploads and eventlet gunicorn workers.
"""
def __init__(self, *args, **kwargs):
super(EventletS3BotoStorage_AllPublic, self).__init__(*args, **kwargs)
# Use the workaround as Boto's set_contents_from_file() callback.
self.s3_callback_during_upload = eventlet_workaround
| {
"content_hash": "093faaf7847a372912f95657dbe745c3",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 35.11904761904762,
"alnum_prop": 0.7315254237288136,
"repo_name": "bdabrowski/django-athumb",
"id": "c5600a36756943f7473688481585695e5952f5c5",
"size": "1475",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "athumb/backends/s3boto_gunicorn_eventlet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "58869"
}
],
"symlink_target": ""
} |
import json
import urllib2
import urllib
import sys
import hashlib
import subprocess
import os
#You can put the version info somewhere in your program
currentVersion = 1.0
#MD5 check helper function
def md5(fileName):
"""Compute md5 hash of the specified file"""
m = hashlib.md5()
try:
fd = open(fileName,"rb")
except IOError:
print "Reading file has problem:", filename
return
x = fd.read()
fd.close()
m.update(x)
return m.hexdigest()
#Start of Program
print '[LASS-OTA]OTA Begin: Retrieve Version Info...'
#Update info URL(this must in code or somewhere you store it)
url = 'https://dl.dropboxusercontent.com/u/15099413/version.info'
# This takes a python object and dumps it to a string which is a JSON representation of that object
data = json.load(urllib2.urlopen(url))
netversion = float(data['VERSION'])
#Check Versions
print '[LASS-OTA]Check Versions...'
need_update=0
if netversion > currentVersion:
print '[LASS-OTA]Need Update!\nOld Version:',currentVersion,'====>>>> New Version:',data['VERSION']
print '[LASS-OTA]Start Download New Firmware:',data['linkurl']
print '[LASS-OTA]Start Download New Firmware:',data['linkurl2']
need_update=1
elif netversion == currentVersion:
print '[LASS-OTA]You Have the Correct Version(Maybe)'
sys.exit(0)
#Download
downloaded=0
if need_update:
cmd='curl '+data['linkurl']+' -o smart7688.hex'
print cmd
subprocess.call(cmd, shell=True)
cmd='curl '+data['linkurl2']+' -o main_new.py'
print cmd
subprocess.call(cmd, shell=True)
downloaded = 1
#Check Download MD5
MD5checked=0
if downloaded:
md5checksum = md5("smart7688.hex")
print "[LASS-OTA]Downloaded file's MD5 is %s With File Name %s" % (md5checksum, "smart7688.hex")
if data['MD5']==md5checksum:
MD5checked = 1
print '[LASS-OTA]MD5 Pattern Matched!...'
else:
print '[LASS-OTA]MD5 Dismatch!...Abort'
sys.exit(0)
if downloaded and MD5checked:
md5checksum = md5("main_new.py")
print "[LASS-OTA]Downloaded file's MD5 is %s With File Name %s" % (md5checksum, "main_new.py")
if data['MD5_2']==md5checksum:
MD5checked = 1
print '[LASS-OTA]MD5 Pattern Matched!...'
else:
print '[LASS-OTA]MD5 Dismatch!...Abort'
sys.exit(0)
#Burnning Hex in ATMEGA32U2
retcode =0
if MD5checked:
cmd='avrdude -c linuxgpio -C /etc/avrdude.conf -p m32u4 -U flash:w:smart7688.hex -Uflash:w:$1 $2'
retcode = subprocess.call(cmd, shell=True)
if retcode != 1:
print '[LASS-OTA]Avrdude Failed...'
sys.exit(retcode)
else:
print '[LASS-OTA]Avrdude Success...'
os.remove("smart7688.hex")
#Restart Python Process and Reboot
if retcode:
command = "killall python main.py"
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
print '[LASS-OTA]Remove Main.py'
os.remove("main.py")
print '[LASS-OTA]Rename Main_new.py'
os.rename("main_new.py","main.py")
print '[LASS-OTA]OTA Completed Rebootting System....'
os.system("reboot")
| {
"content_hash": "4f13a8163227ff87cf8f0af7488fe47f",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 100,
"avg_line_length": 27.914285714285715,
"alnum_prop": 0.7048788809280109,
"repo_name": "LinkItONEDevGroup/LASS",
"id": "6df44f3e75c6d8139589f7a41bfdc59a63af36e9",
"size": "2950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Device_LinkItSmart7688Duo/python_version/Example/OTA_RODODO/ota.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "9857"
},
{
"name": "C",
"bytes": "5326619"
},
{
"name": "C++",
"bytes": "2643352"
},
{
"name": "CMake",
"bytes": "5007"
},
{
"name": "CSS",
"bytes": "158490"
},
{
"name": "HTML",
"bytes": "1658027"
},
{
"name": "Hack",
"bytes": "5364"
},
{
"name": "Java",
"bytes": "16636"
},
{
"name": "JavaScript",
"bytes": "1970939"
},
{
"name": "Makefile",
"bytes": "908"
},
{
"name": "Objective-C",
"bytes": "24111"
},
{
"name": "PHP",
"bytes": "2973502"
},
{
"name": "Processing",
"bytes": "8909"
},
{
"name": "Python",
"bytes": "223297"
},
{
"name": "R",
"bytes": "915"
},
{
"name": "Ruby",
"bytes": "1230"
},
{
"name": "Shell",
"bytes": "12087"
},
{
"name": "TSQL",
"bytes": "4938"
}
],
"symlink_target": ""
} |
class Sample(object):
"""Sample"""
def __init__(self, name, duration):
self.name = name
self.duration = duration
| {
"content_hash": "6202f91f6324950fe5ad17fd9783691f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 39,
"avg_line_length": 23,
"alnum_prop": 0.5652173913043478,
"repo_name": "gkvoelkl/python-sonic",
"id": "b53120d17b83eacd8aa33c8ccc69de3612dface7",
"size": "138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psonic/samples/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "34643"
},
{
"name": "Python",
"bytes": "30653"
}
],
"symlink_target": ""
} |
from animal import Animal
from ..ai.state import State
from location import Location
from math import pi
import random
import nodebox.graphics as ng
class Dog(Animal):
def __init__(self, world):
Animal.__init__(self, world)
self.speed = 4.0
self.angle = 0.0
def reset(self):
self.loc = self.randomLocation()
def randomLocation(self):
# x, y = random.choice((self.world.border, self.world.width-self.world.border)), random.choice((self.world.border, self.world.height-self.world.border))
x, y = random.randint(self.world.border, self.world.width-self.world.border), random.randint(self.world.border, self.world.height-self.world.border)
return Location(x, y)
# Get move from AI
def getMove(self):
state = self.getState()
self.action = self.world.ai.getAction(state)
# target centric
if self.action == 'towards':
self.angle = state.target_a
elif self.action == 'away-l':
self.angle = state.target_a + pi
elif self.action == 'left':
self.angle = state.target_a + 0.40 * pi
elif self.action == 'right':
self.angle = state.target_a - 0.40 * pi
# save for evaluate()
self.old_state = state
def getState(self):
return State(self, self.world.ai)
# Evaluate
def evaluate(self):
self.new_state = self.getState()
self.world.ai.evaluate(self.old_state, self.action, self.new_state)
# An update is moving at certain speed in certain direction
def update(self):
self.loc = self.loc.move(self.world.speed * self.speed, self.angle)
self.saveHistory()
| {
"content_hash": "6150d4c15f3a14492585541d739a26aa",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 160,
"avg_line_length": 31.537037037037038,
"alnum_prop": 0.6236054022313564,
"repo_name": "schmit/sheepherding",
"id": "0e9d63a7a8f3be831d30e0d2961fd9ae7f354717",
"size": "1703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sheepherding/world/dog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43915"
}
],
"symlink_target": ""
} |
__author__ = 'Daan Wierstra and Tom Schaul'
import pickle
from scipy import dot, argmax
from random import shuffle
from trainer import Trainer
from pybrain.utilities import fListToString
from pybrain.auxiliary import GradientDescent
class BackpropTrainer(Trainer):
"""Trainer that trains the parameters of a module according to a
supervised dataset (potentially sequential) by backpropagating the errors
(through time)."""
def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
momentum=0., verbose=False, batchlearning=False,
weightdecay=0.):
"""Create a BackpropTrainer to train the specified `module` on the
specified `dataset`.
The learning rate gives the ratio of which parameters are changed into
the direction of the gradient. The learning rate decreases by `lrdecay`,
which is used to to multiply the learning rate after each training
step. The parameters are also adjusted with respect to `momentum`, which
is the ratio by which the gradient of the last timestep is used.
If `batchlearning` is set, the parameters are updated only at the end of
each epoch. Default is False.
`weightdecay` corresponds to the weightdecay rate, where 0 is no weight
decay at all.
"""
Trainer.__init__(self, module)
self.setData(dataset)
self.verbose = verbose
self.batchlearning = batchlearning
self.weightdecay = weightdecay
self.epoch = 0
self.totalepochs = 0
# set up gradient descender
self.descent = GradientDescent()
self.descent.alpha = learningrate
self.descent.momentum = momentum
self.descent.alphadecay = lrdecay
self.descent.init(module.params)
def train(self):
"""Train the associated module for one epoch."""
assert len(self.ds) > 0, "Dataset cannot be empty."
self.module.resetDerivatives()
errors = 0
ponderation = 0.
shuffledSequences = []
for seq in self.ds._provideSequences():
shuffledSequences.append(seq)
shuffle(shuffledSequences)
for seq in shuffledSequences:
e, p = self._calcDerivs(seq)
errors += e
ponderation += p
if not self.batchlearning:
gradient = self.module.derivs - self.weightdecay * self.module.params
new = self.descent(gradient, errors)
if new is not None:
self.module.params[:] = new
self.module.resetDerivatives()
if self.verbose:
print "Total error:", errors / ponderation
if self.batchlearning:
self.module._setParameters(self.descent(self.module.derivs))
self.epoch += 1
self.totalepochs += 1
return errors / ponderation
def _calcDerivs(self, seq):
"""Calculate error function and backpropagate output errors to yield
the gradient."""
self.module.reset()
for sample in seq:
self.module.activate(sample[0])
error = 0
ponderation = 0.
for offset, sample in reversed(list(enumerate(seq))):
# need to make a distinction here between datasets containing
# importance, and others
target = sample[1]
outerr = target - self.module.outputbuffer[offset]
if len(sample) > 2:
importance = sample[2]
error += 0.5 * dot(importance, outerr ** 2)
ponderation += sum(importance)
self.module.backActivate(outerr * importance)
else:
error += 0.5 * sum(outerr ** 2)
ponderation += len(target)
# FIXME: the next line keeps arac from producing NaNs. I don't
# know why that is, but somehow the __str__ method of the
# ndarray class fixes something,
str(outerr)
self.module.backActivate(outerr)
return error, ponderation
def _checkGradient(self, dataset=None, silent=False):
"""Numeric check of the computed gradient for debugging purposes."""
if dataset:
self.setData(dataset)
res = []
for seq in self.ds._provideSequences():
self.module.resetDerivatives()
self._calcDerivs(seq)
e = 1e-6
analyticalDerivs = self.module.derivs.copy()
numericalDerivs = []
for p in range(self.module.paramdim):
storedoldval = self.module.params[p]
self.module.params[p] += e
righterror, dummy = self._calcDerivs(seq)
self.module.params[p] -= 2 * e
lefterror, dummy = self._calcDerivs(seq)
approxderiv = (righterror - lefterror) / (2 * e)
self.module.params[p] = storedoldval
numericalDerivs.append(approxderiv)
r = zip(analyticalDerivs, numericalDerivs)
res.append(r)
if not silent:
print r
return res
def testOnData(self, dataset=None, verbose=False):
"""Compute the MSE of the module performance on the given dataset.
If no dataset is supplied, the one passed upon Trainer initialization is
used."""
if dataset == None:
dataset = self.ds
dataset.reset()
if verbose:
print '\nTesting on data:'
errors = []
importances = []
ponderatedErrors = []
for seq in dataset._provideSequences():
self.module.reset()
e, i = dataset._evaluateSequence(self.module.activate, seq, verbose)
importances.append(i)
errors.append(e)
ponderatedErrors.append(e / i)
if verbose:
print 'All errors:', ponderatedErrors
assert sum(importances) > 0
avgErr = sum(errors) / sum(importances)
if verbose:
print 'Average error:', avgErr
print ('Max error:', max(ponderatedErrors), 'Median error:',
sorted(ponderatedErrors)[len(errors) / 2])
return avgErr
def testOnClassData(self, dataset=None, verbose=False,
return_targets=False):
"""Return winner-takes-all classification output on a given dataset.
If no dataset is given, the dataset passed during Trainer
initialization is used. If return_targets is set, also return
corresponding target classes.
"""
if dataset == None:
dataset = self.ds
dataset.reset()
out = []
targ = []
for seq in dataset._provideSequences():
self.module.reset()
for input, target in seq:
res = self.module.activate(input)
out.append(argmax(res))
targ.append(argmax(target))
if return_targets:
return out, targ
else:
return out
def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None,
continueEpochs=10, validationProportion=0.25, outFile=None):
"""Train the module on the dataset until it converges.
Return the module with the parameters that gave the minimal validation
error.
If no dataset is given, the dataset passed during Trainer
initialization is used. validationProportion is the ratio of the dataset
that is used for the validation dataset.
If maxEpochs is given, at most that many epochs
are trained. Each time validation error hits a minimum, try for
continueEpochs epochs to find a better one."""
epochs = 0
if dataset == None:
dataset = self.ds
if verbose == None:
verbose = self.verbose
# Split the dataset randomly: validationProportion of the samples for
# validation.
trainingData, validationData = (
dataset.splitWithProportion(1 - validationProportion))
if not (len(trainingData) > 0 and len(validationData)):
raise ValueError("Provided dataset too small to be split into training " +
"and validation sets with proportion " + str(validationProportion))
self.ds = trainingData
bestweights = self.module.params.copy()
bestverr = self.testOnData(validationData)
trainingErrors = []
validationErrors = [bestverr]
while True:
trainingErrors.append(self.train())
validationErrors.append(self.testOnData(validationData))
if epochs == 0 or validationErrors[-1] < bestverr:
# one update is always done
bestverr = validationErrors[-1]
bestweights = self.module.params.copy()
if maxEpochs != None and epochs >= maxEpochs:
self.module.params[:] = bestweights
break
epochs += 1
if outFile != None :
pfile = open(outFile + '.net', 'w')
pickle.dump(self.module, pfile)
f = open(outFile, 'a')
f.write(str(epochs) + '\n')
f.write('train-errors:' + fListToString(trainingErrors, 6) + '\n')
f.write('valid-errors:' + fListToString(validationErrors, 6) + '\n')
f.close()
if len(validationErrors) >= continueEpochs * 2:
# have the validation errors started going up again?
# compare the average of the last few to the previous few
old = validationErrors[-continueEpochs * 2:-continueEpochs]
new = validationErrors[-continueEpochs:]
if min(new) > max(old):
self.module.params[:] = bestweights
break
trainingErrors.append(self.testOnData(trainingData))
self.ds = dataset
if verbose:
print 'train-errors:', fListToString(trainingErrors, 6)
print 'valid-errors:', fListToString(validationErrors, 6)
return trainingErrors, validationErrors
| {
"content_hash": "b9f3d5c8bc2be5181dd79edafc97b671",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 96,
"avg_line_length": 40.66403162055336,
"alnum_prop": 0.5867029548989113,
"repo_name": "hassaanm/stock-trading",
"id": "061bb6d0fd809a992d8d866ff3372458f3a63e62",
"size": "10288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pybrain/supervised/trainers/backprop.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "822"
},
{
"name": "C++",
"bytes": "18096"
},
{
"name": "JavaScript",
"bytes": "19227"
},
{
"name": "Perl",
"bytes": "1924"
},
{
"name": "Python",
"bytes": "2461668"
},
{
"name": "Shell",
"bytes": "3384"
}
],
"symlink_target": ""
} |
import os
from heat.engine.resources.hwcloud.hws_service.hws_client import HWSClient
from heat.engine.resources.cloudmanager.util.retry_decorator import RetryDecorator
from heat.openstack.common import log as logging
from heat.engine.resources.cloudmanager.util.cloud_manager_exception import *
from heat.engine.resources.cloudmanager.exception import *
import time
import heat.engine.resources.cloudmanager.constant as constant
from heat.engine.resources.cloudmanager.commonutils import *
RSP_STATUS = "status"
RSP_BODY = "body"
RSP_STATUS_OK = "2"
MAX_RETRY = 50
#unit=second
SLEEP_TIME = 3
MAX_CHECK_TIMES = 2000
LOG = logging.getLogger(__name__)
def start_hws_gateway(host_ip, user, passwd):
execute_cmd_without_stdout(
host=host_ip, user=user, password=passwd,
cmd='cd %(dis)s; sh %(script)s start'
% {"dis": constant.PatchesConstant.REMOTE_HWS_SCRIPTS_DIR,
"script":
constant.PatchesConstant.START_HWS_GATEWAY_SCRIPT}
)
def stop_hws_gateway(host_ip, user, password):
LOG.info("start hws java gateway ...")
execute_cmd_without_stdout(
host=host_ip, user=user, password=password,
cmd='cd %(dis)s; sh %(script)s stop'
% {"dis": constant.PatchesConstant.REMOTE_HWS_SCRIPTS_DIR,
"script":
constant.PatchesConstant.START_HWS_GATEWAY_SCRIPT}
)
class HwsInstaller(object):
def __init__(self, ak, sk, region, protocol, host, port, project_id):
self.hws_client = HWSClient(ak, sk, region, protocol, host, port)
self.project_id = project_id
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="create vm"))
def create_vm(self, image_ref, flavor_ref, name, vpcid, nics_subnet_list, root_volume_type,availability_zone,
personality_path=None, personality_contents=None, adminPass=None, public_ip_id=None, count=None,
data_volumes=None, security_groups=None, key_name=None):
result = self.hws_client.ecs.create_server(self.project_id, image_ref, flavor_ref, name, vpcid, nics_subnet_list, root_volume_type,
availability_zone, personality_path, personality_contents, adminPass, public_ip_id, count,
data_volumes, security_groups, key_name)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create cascaded vm")
return result[RSP_BODY]["job_id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=UninstallCascadedFailed(
current_step="delete vm"))
def delete_vm(self, server_id_list, delete_public_ip, delete_volume):
result = self.hws_client.ecs.delete_server\
(self.project_id, server_id_list, delete_public_ip, delete_volume)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="delete cascaded vm")
return result[RSP_BODY]["job_id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="create vm"))
def create_vpc(self, name, cidr):
result = self.hws_client.vpc.create_vpc(self.project_id, name, cidr)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create vpc")
return result[RSP_BODY]["vpc"]["id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=UninstallCascadedFailed(
current_step="delete vpc"))
def delete_vpc(self, vpc_id):
result = self.hws_client.vpc.delete_vpc(self.project_id, vpc_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="delete vpc")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="create subnet"))
def create_subnet(self, name, cidr, availability_zone, gateway_ip, vpc_id,
dhcp_enable=None, primary_dns=None, secondary_dns=None):
result = self.hws_client.vpc.create_subnet(self.project_id, name, cidr,
availability_zone, gateway_ip, vpc_id,
dhcp_enable, primary_dns, secondary_dns)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create subnet")
return result[RSP_BODY]["subnet"]["id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=UninstallCascadedFailed(
current_step="delete subnet"))
def delete_subnet(self, vpc_id, subnet_id):
result = self.hws_client.vpc.delete_subnet(self.project_id, vpc_id, subnet_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="delete subnet")
return result[RSP_BODY]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get job detail"))
def get_job_detail(self, job_id):
result = self.hws_client.vpc.get_job_detail(self.project_id, job_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get job detail")
return result[RSP_BODY]
def block_until_delete_resource_success(self, job_id):
for i in range(MAX_CHECK_TIMES):
result = self.get_job_detail(job_id)
status = result[RSP_STATUS]
if status == "FAILED":
raise InstallCascadedFailed(current_step="delete resource")
elif status == "SUCCESS":
return
else:
time.sleep(3)
pass
def block_until_create_vm_success(self, job_id):
server_id = None
for i in range(MAX_CHECK_TIMES):
result = self.get_job_detail(job_id)
status = result[RSP_STATUS]
if status == "FAILED":
break
elif status == "SUCCESS":
server_id = result['entities']['sub_jobs'][0]["entities"]["server_id"]
break
else:
time.sleep(SLEEP_TIME)
if server_id is None:
raise InstallCascadedFailed(current_step="create vm")
return server_id
def block_until_create_nic_success(self, job_id):
nic_id = None
for i in range(MAX_CHECK_TIMES):
result = self.get_job_detail(job_id)
status = result[RSP_STATUS]
if status == "FAILED":
break
elif status == "SUCCESS":
nic_id = result['entities']['sub_jobs'][0]["entities"]["nic_id"]
break
else:
time.sleep(3)
if nic_id is None:
raise InstallCascadedFailed(current_step="create nic")
return nic_id
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get server nics info"))
def get_all_nics(self, server_id):
result = self.hws_client.ecs.get_all_nics(self.project_id, server_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="get server incs info")
return result[RSP_BODY]["interfaceAttachments"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get server ips"))
def get_server_ips(self, server_id):
result = self.hws_client.ecs.get_server_ips(self.project_id, server_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="get server ips")
return result[RSP_BODY]["interfaceAttachments"]["fixed_ips"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get free public ip"))
def alloc_public_ip(self, name):
result = self.hws_client.vpc.list_public_ips(self.project_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get free public ip")
free_ip = None
public_ips = result[RSP_BODY]["publicips"]
for ip in public_ips:
if ip["status"] == "DOWN":
free_ip = ip
return free_ip
if free_ip is None:
publicip = dict()
bandwidth = dict()
publicip["type"]="5_bgp"
bandwidth["name"]=name
bandwidth["size"]=100
bandwidth["share_type"]="PER"
bandwidth["charge_mode"]= "traffic"
result = self.hws_client.vpc.create_public_ip(self.project_id, publicip, bandwidth)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create public ip")
free_ip = result[RSP_BODY]["publicip"]
return free_ip
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=UninstallCascadedFailed(
current_step="release public ip"))
def release_public_ip(self, public_ip_id):
result = self.hws_client.vpc.delete_public_ip(self.project_id, public_ip_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise UninstallCascadedFailed(current_step="release public ip")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get security group"))
def get_security_group(self, vpc_id):
opts = dict()
opts["vpc_id"] = vpc_id
result = self.hws_client.vpc.list_security_groups(self.project_id,opts = opts)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get security group")
security_groups = result[RSP_BODY]["security_groups"]
for security_group in security_groups:
if security_group["name"] == "default":
return security_group["id"]
return security_groups[0]["id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="create security group rule"))
def create_security_group_rule(self, security_group_id, direction, ethertype):
result = self.hws_client.vpc.create_security_group_rule(
self.project_id, security_group_id, direction, ethertype)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="create security group rule")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get_external_api_port_id"))
def get_external_api_port_id(self, server_id, external_api_nic_id):
result = self.hws_client.ecs.get_nic_info(self.project_id, server_id, external_api_nic_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get_external_api_port_id")
interfaceAttachment = result[RSP_BODY]["interfaceAttachment"]
return interfaceAttachment["port_id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="bind public ip to cascaded"))
def bind_public_ip(self, public_ip_id, port_id):
result = self.hws_client.vpc.bind_public_ip(
self.project_id, public_ip_id, port_id)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="bind public ip to cascaded")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="add nics to vm"))
def add_nics(self, server_id, subnet_id, security_groups, ip_address = None):
nic = dict()
nic["subnet_id"] = subnet_id
nic["security_groups"] = security_groups
if ip_address:
nic["ip_address"] = ip_address
nics = [nic]
result = self.hws_client.ecs.add_nics(
self.project_id, server_id, nics)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="add nics to cascaded")
return result[RSP_BODY]["job_id"]
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="reboot cascaded"))
def reboot(self, server_id, type):
result = self.hws_client.ecs.reboot(self.project_id, server_id, type)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="reboot cascaded")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="unbound vpn ip-mac"))
def unbound_ip_mac(self, port_id, mac_address):
allowed_address_pairs = []
#allow all ip_addresses to access
pair1={"ip_address":"0.0.0.1/1",
"mac_address":mac_address}
pair2={"ip_address":"128.0.0.0/1",
"mac_address":mac_address}
allowed_address_pairs.append(pair1)
allowed_address_pairs.append(pair2)
result = self.hws_client.vpc.update_port(port_id, allowed_address_pairs=allowed_address_pairs)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="unbound vpn ip-mac")
@RetryDecorator(max_retry_count=MAX_RETRY,
raise_exception=InstallCascadedFailed(
current_step="get image id"))
def get_image_id(self, name):
result = self.hws_client.ims.list(name=name)
status = str(result[RSP_STATUS])
if not status.startswith(RSP_STATUS_OK):
LOG.error(result)
raise InstallCascadedFailed(current_step="get image id")
image_id = result[RSP_BODY]["images"][0]["id"]
return image_id | {
"content_hash": "9ea32a326efb0f50ecd9f8913c6a4f08",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 139,
"avg_line_length": 43.148459383753504,
"alnum_prop": 0.6195793300441443,
"repo_name": "Hybrid-Cloud/orchard",
"id": "da32de801d1b89c0d799c7c5a5b972bac3b73cbf",
"size": "15404",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "code/cloudmanager/install/hws/hws_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1067396"
},
{
"name": "Shell",
"bytes": "49859"
}
],
"symlink_target": ""
} |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create some random points in the unit cube centered at (.5,.5,.5)
#
math = vtk.vtkMath()
points = vtk.vtkPoints()
i = 0
while i < 25:
points.InsertPoint(i,math.Random(0,1),math.Random(0,1),math.Random(0,1))
i = i + 1
profile = vtk.vtkPolyData()
profile.SetPoints(points)
# triangulate them
#
del1 = vtk.vtkDelaunay3D()
del1.SetInputData(profile)
del1.BoundingTriangulationOn()
del1.SetTolerance(0.01)
del1.SetAlpha(0.2)
del1.BoundingTriangulationOff()
shrink = vtk.vtkShrinkFilter()
shrink.SetInputConnection(del1.GetOutputPort())
shrink.SetShrinkFactor(0.9)
map = vtk.vtkDataSetMapper()
map.SetInputConnection(shrink.GetOutputPort())
triangulation = vtk.vtkActor()
triangulation.SetMapper(map)
triangulation.GetProperty().SetColor(1,0,0)
# Create graphics stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(triangulation)
ren1.SetBackground(1,1,1)
renWin.SetSize(250,250)
renWin.Render()
cam1 = ren1.GetActiveCamera()
cam1.Zoom(1.5)
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| {
"content_hash": "26db629f5986f7b316b40bd69d63ce98",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 76,
"avg_line_length": 27.346153846153847,
"alnum_prop": 0.7369901547116737,
"repo_name": "timkrentz/SunTracker",
"id": "ed7831c9b897fc01067943923ac7a7f23841d02f",
"size": "1445",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/Filters/Core/Testing/Python/Delaunay3D.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
} |
"""Add ``password`` column to ``user`` table
Revision ID: 561833c1c74b
Revises: 40e67319e3a9
Create Date: 2015-11-30 06:51:25.872557
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '561833c1c74b'
down_revision = '40e67319e3a9'
branch_labels = None
depends_on = None
airflow_version = '1.6.2'
def upgrade():
op.add_column('user', sa.Column('password', sa.String(255)))
def downgrade():
op.drop_column('user', 'password')
| {
"content_hash": "17416511b3aae3242ac38d330f5fba3e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 20.23076923076923,
"alnum_prop": 0.7129277566539924,
"repo_name": "nathanielvarona/airflow",
"id": "3578fd1e7bdb0a0f2c4c08a190ac2dad47c194ae",
"size": "1313",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/migrations/versions/0010_1_6_2_add_password_column_to_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
from softmax import Softmax
from mlp import MLP
from cnn import CNN
from vae import VAE
from sbn import SBN
from adgm import ADGM
from hdgm import HDGM
from dadgm import DADGM
from convvae import ConvVAE
from convadgm import ConvADGM
# load some models that require the latest version of Lasagne
try:
from resnet import Resnet
except:
print 'WARNING: Could not import the Resnet model'
try:
from dcgan import DCGAN
except:
print 'WARNING: Could not import the DCGAN model'
try:
from ssadgm import SSADGM
except:
print 'WARNING: Could not import the SSADGM model' | {
"content_hash": "30efa5aaa8278db20559a4ba971b17a8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 61,
"avg_line_length": 22.346153846153847,
"alnum_prop": 0.7814113597246127,
"repo_name": "kuleshov/deep-learning-models",
"id": "cb5c7dd6166fa90ab2d9e214de5267cbe1467cb5",
"size": "581",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "750"
},
{
"name": "Python",
"bytes": "172373"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
} |
from tkFont import Font
from Tkinter import (Button, Frame, IntVar, Label,
Listbox, Menu, Scrollbar, Tk)
from nltk.draw.util import CanvasFrame, ShowText
from nltk.util import in_idle
from nltk.tag import RegexpTagger
from nltk.parse import MaltParser
from nltk.sem.logic import Variable
from nltk.sem.drt import DrsDrawer, DrtVariableExpression
from nltk.sem.glue import DrtGlue
class DrtGlueDemo(object):
def __init__(self, examples):
# Set up the main window.
self._top = Tk()
self._top.title('DRT Glue Demo')
# Set up key bindings.
self._init_bindings()
# Initialize the fonts.self._error = None
self._init_fonts(self._top)
self._examples = examples
self._readingCache = [None for example in examples]
# The user can hide the grammar.
self._show_grammar = IntVar(self._top)
self._show_grammar.set(1)
# Set the data to None
self._curExample = -1
self._readings = []
self._drs = None
self._drsWidget = None
self._error = None
self._init_glue()
# Create the basic frames.
self._init_menubar(self._top)
self._init_buttons(self._top)
self._init_exampleListbox(self._top)
self._init_readingListbox(self._top)
self._init_canvas(self._top)
# Resize callback
self._canvas.bind('<Configure>', self._configure)
#########################################
## Initialization Helpers
#########################################
def _init_glue(self):
tagger = RegexpTagger(
[('^(David|Mary|John)$', 'NNP'),
('^(walks|sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$', 'VB'),
('^(go|order|vanish|find|approach)$', 'VB'),
('^(a)$', 'ex_quant'),
('^(every)$', 'univ_quant'),
('^(sandwich|man|dog|pizza|unicorn|cat|senator)$', 'NN'),
('^(big|gray|former)$', 'JJ'),
('^(him|himself)$', 'PRP')
])
depparser = MaltParser(tagger=tagger)
self._glue = DrtGlue(depparser=depparser, remove_duplicates=False)
def _init_fonts(self, root):
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = Font(font=Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(root)
self._size.set(self._sysfont.cget('size'))
self._boldfont = Font(family='helvetica', weight='bold',
size=self._size.get())
self._font = Font(family='helvetica',
size=self._size.get())
if self._size.get() < 0: big = self._size.get()-2
else: big = self._size.get()+2
self._bigfont = Font(family='helvetica', weight='bold',
size=big)
def _init_exampleListbox(self, parent):
self._exampleFrame = listframe = Frame(parent)
self._exampleFrame.pack(fill='both', side='left', padx=2)
self._exampleList_label = Label(self._exampleFrame, font=self._boldfont,
text='Examples')
self._exampleList_label.pack()
self._exampleList = Listbox(self._exampleFrame, selectmode='single',
relief='groove', background='white',
foreground='#909090', font=self._font,
selectforeground='#004040',
selectbackground='#c0f0c0')
self._exampleList.pack(side='right', fill='both', expand=1)
for example in self._examples:
self._exampleList.insert('end', (' %s' % example))
self._exampleList.config(height=min(len(self._examples), 25), width=40)
# Add a scrollbar if there are more than 25 examples.
if len(self._examples) > 25:
listscroll = Scrollbar(self._exampleFrame,
orient='vertical')
self._exampleList.config(yscrollcommand = listscroll.set)
listscroll.config(command=self._exampleList.yview)
listscroll.pack(side='left', fill='y')
# If they select a example, apply it.
self._exampleList.bind('<<ListboxSelect>>', self._exampleList_select)
def _init_readingListbox(self, parent):
self._readingFrame = listframe = Frame(parent)
self._readingFrame.pack(fill='both', side='left', padx=2)
self._readingList_label = Label(self._readingFrame, font=self._boldfont,
text='Readings')
self._readingList_label.pack()
self._readingList = Listbox(self._readingFrame, selectmode='single',
relief='groove', background='white',
foreground='#909090', font=self._font,
selectforeground='#004040',
selectbackground='#c0f0c0')
self._readingList.pack(side='right', fill='both', expand=1)
# Add a scrollbar if there are more than 25 examples.
listscroll = Scrollbar(self._readingFrame,
orient='vertical')
self._readingList.config(yscrollcommand = listscroll.set)
listscroll.config(command=self._readingList.yview)
listscroll.pack(side='right', fill='y')
self._populate_readingListbox()
def _populate_readingListbox(self):
# Populate the listbox with integers
self._readingList.delete(0, 'end')
for i in range(len(self._readings)):
self._readingList.insert('end', (' %s' % (i+1)))
self._readingList.config(height=min(len(self._readings), 25), width=5)
# If they select a example, apply it.
self._readingList.bind('<<ListboxSelect>>', self._readingList_select)
def _init_bindings(self):
# Key bindings are a good thing.
self._top.bind('<Control-q>', self.destroy)
self._top.bind('<Control-x>', self.destroy)
self._top.bind('<Escape>', self.destroy)
self._top.bind('n', self.next)
self._top.bind('<space>', self.next)
self._top.bind('p', self.prev)
self._top.bind('<BackSpace>', self.prev)
def _init_buttons(self, parent):
# Set up the frames.
self._buttonframe = buttonframe = Frame(parent)
buttonframe.pack(fill='none', side='bottom', padx=3, pady=2)
Button(buttonframe, text='Prev',
background='#90c0d0', foreground='black',
command=self.prev,).pack(side='left')
Button(buttonframe, text='Next',
background='#90c0d0', foreground='black',
command=self.next,).pack(side='left')
def _configure(self, event):
self._autostep = 0
(x1, y1, x2, y2) = self._cframe.scrollregion()
y2 = event.height - 6
self._canvas['scrollregion'] = '%d %d %d %d' % (x1,y1,x2,y2)
self._redraw()
def _init_canvas(self, parent):
self._cframe = CanvasFrame(parent, background='white',
#width=525, height=250,
closeenough=10,
border=2, relief='sunken')
self._cframe.pack(expand=1, fill='both', side='top', pady=2)
canvas = self._canvas = self._cframe.canvas()
# Initially, there's no tree or text
self._tree = None
self._textwidgets = []
self._textline = None
def _init_menubar(self, parent):
menubar = Menu(parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='q')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
actionmenu = Menu(menubar, tearoff=0)
actionmenu.add_command(label='Next', underline=0,
command=self.next, accelerator='n, Space')
actionmenu.add_command(label='Previous', underline=0,
command=self.prev, accelerator='p, Backspace')
menubar.add_cascade(label='Action', underline=0, menu=actionmenu)
optionmenu = Menu(menubar, tearoff=0)
optionmenu.add_checkbutton(label='Remove Duplicates', underline=0,
variable=self._glue.remove_duplicates,
command=self._toggle_remove_duplicates,
accelerator='r')
menubar.add_cascade(label='Options', underline=0, menu=optionmenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
viewmenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=12, command=self.resize)
viewmenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=14, command=self.resize)
viewmenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=18, command=self.resize)
viewmenu.add_radiobutton(label='Huge', variable=self._size,
underline=0, value=24, command=self.resize)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0,
command=self.about)
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
parent.config(menu=menubar)
#########################################
## Main draw procedure
#########################################
def _redraw(self):
canvas = self._canvas
# Delete the old DRS, widgets, etc.
if self._drsWidget is not None:
self._drsWidget.clear()
if self._drs:
self._drsWidget = DrsWidget( self._canvas, self._drs )
self._drsWidget.draw()
if self._error:
self._drsWidget = DrsWidget( self._canvas, self._error )
self._drsWidget.draw()
#########################################
## Button Callbacks
#########################################
def destroy(self, *e):
self._autostep = 0
if self._top is None: return
self._top.destroy()
self._top = None
def prev(self, *e):
selection = self._readingList.curselection()
readingListSize = self._readingList.size()
# there are readings
if readingListSize > 0:
# if one reading is currently selected
if len(selection) == 1:
index = int(selection[0])
# if it's on (or before) the first item
if index <= 0:
self._select_previous_example()
else:
self._readingList_store_selection(index-1)
else:
#select its first reading
self._readingList_store_selection(readingListSize-1)
else:
self._select_previous_example()
def _select_previous_example(self):
#if the current example is not the first example
if self._curExample > 0:
self._exampleList_store_selection(self._curExample-1)
else:
#go to the last example
self._exampleList_store_selection(len(self._examples)-1)
def next(self, *e):
selection = self._readingList.curselection()
readingListSize = self._readingList.size()
# if there are readings
if readingListSize > 0:
# if one reading is currently selected
if len(selection) == 1:
index = int(selection[0])
# if it's on (or past) the last item
if index >= (readingListSize-1):
self._select_next_example()
else:
self._readingList_store_selection(index+1)
else:
#select its first reading
self._readingList_store_selection(0)
else:
self._select_next_example()
def _select_next_example(self):
#if the current example is not the last example
if self._curExample < len(self._examples)-1:
self._exampleList_store_selection(self._curExample+1)
else:
#go to the first example
self._exampleList_store_selection(0)
def about(self, *e):
ABOUT = ("NLTK Discourse Representation Theory (DRT) Glue Semantics Demo\n"+
"Written by Daniel H. Garrette")
TITLE = 'About: NLTK DRT Glue Demo'
try:
from tkMessageBox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self._top, TITLE, ABOUT)
def postscript(self, *e):
self._autostep = 0
self._cframe.print_to_file()
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle(): return
self._top.mainloop(*args, **kwargs)
def resize(self, size=None):
if size is not None: self._size.set(size)
size = self._size.get()
self._font.configure(size=-(abs(size)))
self._boldfont.configure(size=-(abs(size)))
self._sysfont.configure(size=-(abs(size)))
self._bigfont.configure(size=-(abs(size+2)))
self._redraw()
def _toggle_remove_duplicates(self):
self._glue.remove_duplicates = not self._glue.remove_duplicates
self._exampleList.selection_clear(0, 'end')
self._readings = []
self._populate_readingListbox()
self._readingCache = [None for ex in self._examples]
self._curExample = -1
self._error = None
self._drs = None
self._redraw()
def _exampleList_select(self, event):
selection = self._exampleList.curselection()
if len(selection) != 1: return
self._exampleList_store_selection(int(selection[0]))
def _exampleList_store_selection(self, index):
self._curExample = index
example = self._examples[index]
self._exampleList.selection_clear(0, 'end')
if example:
cache = self._readingCache[index]
if cache:
if isinstance(cache, list):
self._readings = cache
self._error = None
else:
self._readings = []
self._error = cache
else:
try:
self._readings = self._glue.parse_to_meaning(example)
self._error = None
self._readingCache[index] = self._readings
except Exception as e:
self._readings = []
self._error = DrtVariableExpression(Variable('Error: ' + str(e)))
self._readingCache[index] = self._error
#add a star to the end of the example
self._exampleList.delete(index)
self._exampleList.insert(index, (' %s *' % example))
self._exampleList.config(height=min(len(self._examples), 25), width=40)
self._populate_readingListbox()
self._exampleList.selection_set(index)
self._drs = None
self._redraw()
def _readingList_select(self, event):
selection = self._readingList.curselection()
if len(selection) != 1: return
self._readingList_store_selection(int(selection[0]))
def _readingList_store_selection(self, index):
reading = self._readings[index]
self._readingList.selection_clear(0, 'end')
if reading:
self._readingList.selection_set(index)
self._drs = reading.simplify().normalize().resolve_anaphora()
self._redraw()
class DrsWidget(object):
def __init__(self, canvas, drs, **attribs):
self._drs = drs
self._canvas = canvas
canvas.font = Font(font=canvas.itemcget(canvas.create_text(0, 0, text=''), 'font'))
canvas._BUFFER = 3
self.bbox = (0, 0, 0, 0)
def draw(self):
(right, bottom) = DrsDrawer(self._drs, canvas=self._canvas).draw();
self.bbox = (0, 0, right+1, bottom+1)
def clear(self):
self._canvas.create_rectangle(self.bbox, fill="white", width="0" )
def demo():
examples = ['John walks',
'David sees Mary',
'David eats a sandwich',
'every man chases a dog',
# 'every man believes a dog yawns',
# 'John gives David a sandwich',
'John chases himself',
# 'John persuades David to order a pizza',
# 'John tries to go',
# 'John tries to find a unicorn',
# 'John seems to vanish',
# 'a unicorn seems to approach',
# 'every big cat leaves',
# 'every gray cat leaves',
# 'every big gray cat leaves',
# 'a former senator leaves',
# 'John likes a cat',
# 'John likes every cat',
# 'he walks',
# 'John walks and he leaves'
]
DrtGlueDemo(examples).mainloop()
if __name__ == '__main__': demo()
| {
"content_hash": "1dfc035ada1b4c013d2c69d96c123a18",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 108,
"avg_line_length": 37.970338983050844,
"alnum_prop": 0.5431313469478852,
"repo_name": "Jaemu/haiku.py",
"id": "1bce103e63147d2ce0c3f170f7f192416a9ce137",
"size": "18232",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nltk/sem/drt_glue_demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6075"
},
{
"name": "HTML",
"bytes": "4905"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Makefile",
"bytes": "178"
},
{
"name": "OpenEdge ABL",
"bytes": "8895766"
},
{
"name": "Python",
"bytes": "4745673"
}
],
"symlink_target": ""
} |
import os, socket, binascii
from twisted.internet import reactor
from autobahn.websocket import WebSocketClientFactory, WebSocketClientProtocol
from autobahn.websocket import WebSocketServerFactory, WebSocketServerProtocol
class BroadcastServerProtocol(WebSocketServerProtocol):
def onOpen(self):
self.factory.register(self)
def onClose(self, wasClean, code, reason):
self.factory.unregister(self)
def onMessage(self, msg, binary):
self.factory.broadcast(msg, binary)
class BroadcastServerFactory(WebSocketServerFactory):
protocol = BroadcastServerProtocol
def __init__(self, url, debug = False):
WebSocketServerFactory.__init__(self, url, debug = debug, debugCodePaths = debug)
def startFactory(self):
self.clients = set()
self.tickcount = 0
self.tick()
# def stopFactory(self):
# reactor.stop()
def register(self, client):
self.clients.add(client)
def unregister(self, client):
self.clients.discard(client)
def broadcast(self, msg, binary = False):
for c in self.clients:
c.sendMessage(msg, binary)
def tick(self):
self.tickcount += 1
self.broadcast("tick %d" % self.tickcount)
reactor.callLater(1, self.tick)
class BroadcastClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("hello from %s[%d]" % (socket.gethostname(), os.getpid()))
reactor.callLater(2, self.sendHello)
def onOpen(self):
self.sendHello()
def onMessage(self, msg, binary):
if binary:
print "received: ", binascii.b2a_hex(msg)
else:
print "received: ", msg
class BroadcastClientFactory(WebSocketClientFactory):
protocol = BroadcastClientProtocol
def __init__(self, url, debug = False):
WebSocketClientFactory.__init__(self, url, debug = debug, debugCodePaths = debug)
# def clientConnectionLost(self, connector, reason):
# reactor.stop()
| {
"content_hash": "08e8a408b5857cdb0b127640c260c56a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 87,
"avg_line_length": 26.14666666666667,
"alnum_prop": 0.6930137684854666,
"repo_name": "normanmaurer/AutobahnTestSuite",
"id": "9d72ec66bee6eb4b00ee826f755973494329414f",
"size": "2732",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "autobahntestsuite/autobahntestsuite/broadcast.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "2806"
},
{
"name": "Python",
"bytes": "386581"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../python_code'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Cavro XP3000 GUI'
copyright = u'2014, Nikos Koukis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CavroXP3000GUIdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CavroXP3000GUI.tex', u'Cavro XP3000 GUI Documentation',
u'Nikos Koukis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cavroxp3000gui', u'Cavro XP3000 GUI Documentation',
[u'Nikos Koukis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CavroXP3000GUI', u'Cavro XP3000 GUI Documentation',
u'Nikos Koukis', 'CavroXP3000GUI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "c6ff8d5be671f2ce29decccb77d9d462",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 79,
"avg_line_length": 31.762096774193548,
"alnum_prop": 0.7058524819093563,
"repo_name": "bergercookie/Pump3000",
"id": "1c26be1d615b30e28c0e581abed62fb8f12904eb",
"size": "8306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "16424"
},
{
"name": "JavaScript",
"bytes": "54587"
},
{
"name": "Makefile",
"bytes": "8885"
},
{
"name": "Python",
"bytes": "240687"
},
{
"name": "Shell",
"bytes": "7265"
},
{
"name": "TeX",
"bytes": "93674"
}
],
"symlink_target": ""
} |
"""
argparse supplements
"""
# ----------------------------------------------------------------------------
from __future__ import absolute_import
import argparse
import os
import re
import shlex
__all__ = [
"action",
"type",
]
class _Registry(object):
pass
def _register(registry, name, value):
setattr(registry, name, value)
# Types ----------------------------------------------------------------------
type = _Registry()
def type_bool(string):
"""
A strict parser for bools
unlike Python's `bool()`, where `bool('False')` is `True`
This function can be passed as `type=` argument to argparse to parse values
passed to command line arguments.
"""
if string in ['0', 'false', 'False']:
return False
if string in ['1', 'true', 'True']:
return True
raise argparse.ArgumentTypeError("%r is not a boolean value" % string)
_register(type, 'bool', type_bool)
def type_shell_split(string):
"""
Parse and split shell arguments string into a list of shell arguments.
Recognize `,` as a separator as well as white spaces.
string: -BAR="foo bar" -BAZ='foo,bar',-QUX 42
into
['-BAR=foo bar', '-BAZ=foo,bar', "-QUX", "42"]
"""
lex = shlex.shlex(string, posix=True)
lex.whitespace_split = True
lex.whitespace += ','
return list(lex)
_register(type, 'shell_split', type_shell_split)
class CompilerVersion(object):
"""A typed representation of a compiler version."""
def __init__(self, string_representation, components):
self.string_representation = string_representation
self.components = components
def __str__(self):
return self.string_representation
def type_clang_compiler_version(string):
"""
Parse version string and split into a tuple of strings
(major, minor, patch)
Supports "MAJOR.MINOR.PATCH" and "MAJOR.MINOR.PATCH.PATCH" formats.
"""
m = re.match(r'^([0-9]+)\.([0-9]+)\.([0-9]+)(\.([0-9]+))?$', string)
if m is not None:
return CompilerVersion(
string_representation=string,
components=m.group(1, 2, 3, 5))
raise argparse.ArgumentTypeError(
"%r is an invalid version value, "
"must be 'MAJOR.MINOR.PATCH' or "
"'MAJOR.MINOR.PATCH.PATCH'" % string)
_register(type, 'clang_compiler_version', type_clang_compiler_version)
def type_swift_compiler_version(string):
"""
Parse version string and split into a tuple of strings
(major, minor, patch)
Supports "MAJOR.MINOR" and "MAJOR.MINOR.PATCH" formats.
"""
m = re.match(r'^([0-9]+)\.([0-9]+)(\.([0-9]+))?$', string)
if m is not None:
return CompilerVersion(
string_representation=string,
components=m.group(1, 2, 4))
raise argparse.ArgumentTypeError(
"%r is an invalid version value, "
"must be 'MAJOR.MINOR' or "
"'MAJOR.MINOR.PATCH'" % string)
_register(type, 'swift_compiler_version', type_swift_compiler_version)
def type_executable(string):
"""
Check the string is executable path string.
Convert it to absolute path.
"""
if os.path.isfile(string) and os.access(string, os.X_OK):
return os.path.abspath(string)
raise argparse.ArgumentTypeError(
"%r is not executable" % string)
_register(type, 'executable', type_executable)
# Actions --------------------------------------------------------------------
action = _Registry()
class _UnavailableAction(argparse.Action):
def __init__(self,
option_strings,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
nargs='?',
help=None):
super(_UnavailableAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=nargs,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
if option_string is not None:
arg = option_string
else:
arg = str(values)
parser.error('unknown argument: %s' % arg)
_register(action, 'unavailable', _UnavailableAction)
class _ConcatAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
old_val = getattr(namespace, self.dest)
if old_val is None:
val = values
else:
val = old_val + values
setattr(namespace, self.dest, val)
_register(action, 'concat', _ConcatAction)
class _OptionalBoolAction(argparse.Action):
def __init__(self,
option_strings,
dest,
default=False,
metavar="BOOL",
help=None):
super(_OptionalBoolAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
metavar=metavar,
nargs="?",
type=type.bool,
help=help,
const=True)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
_register(action, 'optional_bool', _OptionalBoolAction)
| {
"content_hash": "a21336d2dc10f93d8bf6be77ea828718",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 27.45263157894737,
"alnum_prop": 0.5732361963190185,
"repo_name": "ben-ng/swift",
"id": "b8e3eaef3cd04576dc2c725b23bacac0beaa1174",
"size": "5722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/swift_build_support/swift_build_support/arguments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2144"
},
{
"name": "C",
"bytes": "48755"
},
{
"name": "C++",
"bytes": "21092897"
},
{
"name": "CMake",
"bytes": "323862"
},
{
"name": "DTrace",
"bytes": "3545"
},
{
"name": "Emacs Lisp",
"bytes": "54288"
},
{
"name": "LLVM",
"bytes": "56821"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "224929"
},
{
"name": "Objective-C++",
"bytes": "206065"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "635604"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "184114"
},
{
"name": "Swift",
"bytes": "15885146"
},
{
"name": "VimL",
"bytes": "13393"
}
],
"symlink_target": ""
} |
"""Constants for the ViCare integration."""
import enum
DOMAIN = "vicare"
PLATFORMS = ["climate", "sensor", "binary_sensor", "water_heater"]
VICARE_DEVICE_CONFIG = "device_conf"
VICARE_API = "api"
VICARE_NAME = "name"
VICARE_CIRCUITS = "circuits"
CONF_CIRCUIT = "circuit"
CONF_HEATING_TYPE = "heating_type"
DEFAULT_SCAN_INTERVAL = 60
class HeatingType(enum.Enum):
"""Possible options for heating type."""
auto = "auto"
gas = "gas"
oil = "oil"
pellets = "pellets"
heatpump = "heatpump"
fuelcell = "fuelcell"
DEFAULT_HEATING_TYPE = HeatingType.auto
HEATING_TYPE_TO_CREATOR_METHOD = {
HeatingType.auto: "asAutoDetectDevice",
HeatingType.gas: "asGazBoiler",
HeatingType.fuelcell: "asFuelCell",
HeatingType.heatpump: "asHeatPump",
HeatingType.oil: "asOilBoiler",
HeatingType.pellets: "asPelletsBoiler",
}
| {
"content_hash": "13d4b60cd40aad7765181265c90f18ab",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 66,
"avg_line_length": 22.128205128205128,
"alnum_prop": 0.6859791425260718,
"repo_name": "aronsky/home-assistant",
"id": "2336ef40eaacd01d5cb156dd1c1d8424d0ae6f85",
"size": "863",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/vicare/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import unittest
from problem.geocode.geocodio import GeocodeCache, GeocodioGeocoder
class GeocodioTest(unittest.TestCase):
def test_geocode(self):
with GeocodioGeocoder() as geo:
self.assertEqual(
dict(lat=38.884999,
lng=-77.094806,
formatted_addr='1020 N Highland St, Arlington, VA 22201'),
geo.code('1020 North Highland Street unit 1109'
' Arlington, Virginia 22201'))
def test_cache(self):
c = GeocodeCache()
with GeocodioGeocoder() as geo:
c.insert_text_files(geo.log_dir)
| {
"content_hash": "01ac429ae83cf5d716c2e957e24e20ca",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 31.9,
"alnum_prop": 0.5862068965517241,
"repo_name": "jhanley634/testing-tools",
"id": "a3a05eb7a7a4d81c2be55456ae3e2967f500ae2c",
"size": "1723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem/geocode/geocodio_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6462"
},
{
"name": "C++",
"bytes": "1183"
},
{
"name": "Java",
"bytes": "1280"
},
{
"name": "Julia",
"bytes": "12786"
},
{
"name": "Jupyter Notebook",
"bytes": "20233"
},
{
"name": "Makefile",
"bytes": "23635"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "443787"
},
{
"name": "R",
"bytes": "2161"
},
{
"name": "Rust",
"bytes": "3199"
},
{
"name": "Shell",
"bytes": "5724"
},
{
"name": "TeX",
"bytes": "129"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Recipe
admin.site.register(Recipe)
| {
"content_hash": "fec8f1abe5371dae2e0586fcebafacad",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 18,
"alnum_prop": 0.8111111111111111,
"repo_name": "nowackie/leaven",
"id": "ac80ab6554306240b407f9ea0a64e15f7aa92118",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leavenproject/recipes/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "11162"
},
{
"name": "Python",
"bytes": "9524"
}
],
"symlink_target": ""
} |
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
iMax = 5
distributionCollection = DistributionCollection()
distributionCollection.add(Laplace(1.0, 0.0))
distributionCollection.add(Logistic(0.0, 1.0))
distributionCollection.add(LogNormal(0.0, 1.0, 0.0))
distributionCollection.add(Normal(0.0, 1.0))
distributionCollection.add(Rayleigh(1.0))
distributionCollection.add(Student(22))
distributionCollection.add(Triangular(-1.0, 0.3, 1.0))
distributionCollection.add(Uniform(-1.0, 1.0))
distributionCollection.add(Weibull(1.0, 3.0))
for n in range(distributionCollection.getSize()):
distribution = distributionCollection[n]
name = distribution.getImplementation().getClassName()
polynomialFactory = StandardDistributionPolynomialFactory(distribution)
print "polynomialFactory(", name, "=", polynomialFactory, ")"
for i in range(iMax):
print name, " polynomial(", i, ")=", polynomialFactory.build(i)
roots = polynomialFactory.getRoots(iMax - 1)
print name, " polynomial(", iMax - 1, ") roots=", roots
nodes, weights = polynomialFactory.getNodesAndWeights(iMax - 1)
print name, " polynomial(", iMax - 1, ") nodes=", nodes, " and weights=", weights
except:
import sys
print "t_StandardDistributionPolynomialFactory_std.py", sys.exc_type, sys.exc_value
| {
"content_hash": "d18d3aef5a6eecfed3c94a56f63fa2b8",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 89,
"avg_line_length": 44.0625,
"alnum_prop": 0.6943262411347517,
"repo_name": "sofianehaddad/ot-svn",
"id": "2992ea7959ff44e8e4e7b9748a1e2ccbb17ed5fd",
"size": "1434",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/t_StandardDistributionPolynomialFactory_std.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6498"
},
{
"name": "C",
"bytes": "455749"
},
{
"name": "C++",
"bytes": "10021345"
},
{
"name": "CMake",
"bytes": "240050"
},
{
"name": "FORTRAN",
"bytes": "299"
},
{
"name": "Makefile",
"bytes": "12372"
},
{
"name": "NSIS",
"bytes": "26263"
},
{
"name": "Python",
"bytes": "1221927"
},
{
"name": "R",
"bytes": "11141"
},
{
"name": "Scilab",
"bytes": "2612"
},
{
"name": "Shell",
"bytes": "20403"
},
{
"name": "TeX",
"bytes": "4250"
},
{
"name": "Visual Basic",
"bytes": "3294"
}
],
"symlink_target": ""
} |
import os
from draco2.model.test.shopmodel import ShopModel
from draco2.database.test.support import DatabaseTest
class ModelTest(DatabaseTest):
"""Base class for model tests."""
def setup_method(cls, method):
super(ModelTest, cls).setup_method(method)
cls.model = ShopModel(cls.database)
cls.schema = cls.model.schema()
cls.schema.drop()
cls.schema.create()
def teardown_method(cls, method):
cls.model._finalize()
cls.schema.drop()
super(ModelTest, cls).teardown_method(method)
| {
"content_hash": "baea834618bf9829de9ad4ed84542b49",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 29.42105263157895,
"alnum_prop": 0.669051878354204,
"repo_name": "geertj/draco2",
"id": "e6329aa5ffec1a46917d7db539d3364568b63e0b",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "draco2/model/test/support.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "28408"
},
{
"name": "Python",
"bytes": "641313"
},
{
"name": "Shell",
"bytes": "285"
},
{
"name": "VimL",
"bytes": "1020"
}
],
"symlink_target": ""
} |
from decimal import Decimal
def assert_decimal(val):
if isinstance(val, Decimal):
return val
if isinstance(val, (int, str)):
return Decimal(val)
raise ValueError("value is required to be of type 'decimal', "
"but it is of type {!r}".format(type(val).__name__))
| {
"content_hash": "2f377f771c7ad153eb8250100fa40137",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 73,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.6019108280254777,
"repo_name": "jmelett/pyFxTrader",
"id": "4f3406e90687c202104482ffdab834fe6ebfe372",
"size": "314",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trader/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79204"
}
],
"symlink_target": ""
} |
import sys
from socket import timeout
from .version import __version__
try:
import urllib.request as urllib_request
from urllib.parse import urlencode
from urllib.error import HTTPError
except ImportError: # Python 2
import urllib2 as urllib_request
from urllib2 import HTTPError
from urllib import urlencode
from .exceptions import SendGridClientError, SendGridServerError
class SendGridClient(object):
"""SendGrid API."""
def __init__(self, username, password, **opts):
"""
Construct SendGrid API object.
Args:
username: SendGrid username
password: SendGrid password
user: Send mail on behalf of this user (web only)
raise_errors: If set to False (default): in case of error, `.send`
method will return a tuple (http_code, error_message). If set
to True: `.send` will raise SendGridError. Note, from version
1.0.0, the default will be changed to True, so you are
recommended to pass True for forwards compatability.
"""
self.username = username
self.password = password
self.useragent = 'sendgrid/' + __version__ + ';python'
self.host = opts.get('host', 'https://api.sendgrid.com')
self.port = str(opts.get('port', '443'))
self.endpoint = opts.get('endpoint', '/api/mail.send.json')
self.mail_url = self.host + ':' + self.port + self.endpoint
self._raise_errors = opts.get('raise_errors', False)
# urllib cannot connect to SSL servers using proxies
self.proxies = opts.get('proxies', None)
def _build_body(self, message):
if sys.version_info < (3, 0):
ks = ['from_email', 'from_name', 'subject',
'text', 'html', 'reply_to']
for k in ks:
v = getattr(message, k)
if isinstance(v, unicode):
setattr(message, k, v.encode('utf-8'))
values = {
'api_user': self.username,
'api_key': self.password,
'to[]': message.to,
'toname[]': message.to_name,
'cc[]': message.cc,
'bcc[]': message.bcc,
'from': message.from_email,
'fromname': message.from_name,
'subject': message.subject,
'text': message.text,
'html': message.html,
'replyto': message.reply_to,
'headers': message.headers,
'date': message.date,
'x-smtpapi': message.json_string()
}
for k in list(values.keys()):
if not values[k]:
del values[k]
for filename in message.files:
if message.files[filename]:
values['files[' + filename + ']'] = message.files[filename]
return values
def _make_request(self, message):
if self.proxies:
proxy_support = urllib_request.ProxyHandler(self.proxies)
opener = urllib_request.build_opener(proxy_support)
urllib_request.install_opener(opener)
data = urlencode(self._build_body(message), True).encode('utf-8')
req = urllib_request.Request(self.mail_url, data)
req.add_header('User-Agent', self.useragent)
response = urllib_request.urlopen(req, timeout=10)
body = response.read()
return response.getcode(), body
def send(self, message):
if self._raise_errors:
return self._raising_send(message)
else:
return self._legacy_send(message)
def _legacy_send(self, message):
try:
return self._make_request(message)
except HTTPError as e:
return e.code, e.read()
except timeout as e:
return 408, e
def _raising_send(self, message):
try:
return self._make_request(message)
except HTTPError as e:
if e.code in range(400, 500):
raise SendGridClientError(e.code, e.read())
elif e.code in range(500, 600):
raise SendGridServerError(e.code, e.read())
else:
assert False
except timeout as e:
raise SendGridClientError(408, 'Request timeout')
| {
"content_hash": "48566a84ec9ea5305b952499a68cb6ba",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 36.440677966101696,
"alnum_prop": 0.5674418604651162,
"repo_name": "rice-apps/petition-app",
"id": "c0afa10052a935a78450925543448be6b1dc8cc5",
"size": "4300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/sendgrid/sendgrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553"
},
{
"name": "HTML",
"bytes": "35443"
},
{
"name": "JavaScript",
"bytes": "10514"
},
{
"name": "Python",
"bytes": "78526"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
from flexget import options
from flexget.event import event
from flexget.logger import console
from flexget.plugins.filter import seen
from flexget.utils.database import with_session
from flexget.utils.imdb import is_imdb_url, extract_id
def do_cli(manager, options):
if options.seen_action == 'forget':
seen_forget(manager, options)
elif options.seen_action == 'add':
seen_add(options)
elif options.seen_action == 'search':
seen_search(options)
def seen_forget(manager, options):
forget_name = options.forget_value
if is_imdb_url(forget_name):
imdb_id = extract_id(forget_name)
if imdb_id:
forget_name = imdb_id
count, fcount = seen.forget(forget_name)
console('Removed %s titles (%s fields)' % (count, fcount))
manager.config_changed()
def seen_add(options):
seen_name = options.add_value
if is_imdb_url(seen_name):
imdb_id = extract_id(seen_name)
if imdb_id:
seen_name = imdb_id
seen.add(seen_name, 'cli_add', {'cli_add': seen_name})
console('Added %s as seen. This will affect all tasks.' % seen_name)
@with_session
def seen_search(options, session=None):
search_term = '%' + options.search_term + '%'
seen_entries = seen.search(value=search_term, status=None, session=session)
for se in seen_entries.all():
console('ID: %s Name: %s Task: %s Added: %s' % (se.id, se.title, se.task, se.added.strftime('%c')))
for sf in se.fields:
console(' %s: %s' % (sf.field, sf.value))
console('')
if not seen_entries:
console('No results')
@event('options.register')
def register_parser_arguments():
parser = options.register_command('seen', do_cli, help='view or forget entries remembered by the seen plugin')
subparsers = parser.add_subparsers(dest='seen_action', metavar='<action>')
forget_parser = subparsers.add_parser('forget', help='forget entry or entire task from seen plugin database')
forget_parser.add_argument('forget_value', metavar='<value>',
help='title or url of entry to forget, or name of task to forget')
add_parser = subparsers.add_parser('add', help='add a title or url to the seen database')
add_parser.add_argument('add_value', metavar='<value>', help='the title or url to add')
search_parser = subparsers.add_parser('search', help='search text from the seen database')
search_parser.add_argument('search_term', metavar='<search term>')
| {
"content_hash": "1e1fd3982083aa4cf83226bfbaad1f9c",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 114,
"avg_line_length": 39.707692307692305,
"alnum_prop": 0.6629213483146067,
"repo_name": "Pretagonist/Flexget",
"id": "ba5284adb8ac4112ae56595f62e6f16cb013039c",
"size": "2581",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/plugins/cli/seen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6197"
},
{
"name": "HTML",
"bytes": "38747"
},
{
"name": "JavaScript",
"bytes": "64717"
},
{
"name": "Python",
"bytes": "2462608"
}
],
"symlink_target": ""
} |
from django.conf import settings
from sqjobs import create_sqs_broker, create_sqs_worker
def get_broker():
return create_sqs_broker(
access_key=settings.SQJOBS_SQS_ACCESS_KEY,
secret_key=settings.SQJOBS_SQS_SECRET_KEY,
region_name=settings.SQJOBS_SQS_REGION_NAME,
endpoint_url=getattr(settings, 'SQJOBS_SQS_ENDPOINT_URL', None),
)
def get_worker(queue_name):
return create_sqs_worker(
queue_name=queue_name,
access_key=settings.SQJOBS_SQS_ACCESS_KEY,
secret_key=settings.SQJOBS_SQS_SECRET_KEY,
region_name=settings.SQJOBS_SQS_REGION_NAME,
endpoint_url=getattr(settings, 'SQJOBS_SQS_ENDPOINT_URL', None),
)
def add_job(job_class, *args, **kwargs):
get_broker().add_job(job_class, *args, **kwargs)
| {
"content_hash": "e790d4d6d43429cce5bdf3fab8d3f76e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 30.576923076923077,
"alnum_prop": 0.680503144654088,
"repo_name": "gnufede/sqjobs",
"id": "f40c84321176a782962067dd3f2cf91faa70058f",
"size": "795",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sqjobs/contrib/django/djsqjobs/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "57752"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
} |
from gmusicapi import Mobileclient
import getpass
class GpmSession(object):
# Private Variables
# Public Variables
api = None
logged_in = False
songs = None
playlists = None
# Constructor with optionally passed credentials
# Omit credentials if you want to handle login, include for prompts from this module
def __init__(self, email=None, pw=None):
self.api = Mobileclient()
if not email and not pw:
email = input("Please enter an email address tied to a GPM account: ")
pw = getpass.getpass("Please enter the password associated with %s: " % email)
self.logged_in = self.api.login(email, pw, Mobileclient.FROM_MAC_ADDRESS) # As per api protocol
if self.logged_in:
print("Google Play Music login successful")
else:
print("Google Play Music login failed")
def init(self, songs = True, playlists = True):
if songs:
self.songs = self.api.get_all_songs()
if playlists:
self.playlists = self.api.get_all_playlists()
def get_song_stream(self, title, artist=None):
print(not self.songs)
if not self.songs:
self.init(True, False)
song = next(iter((track for track in self.songs if self._filter_condition(track, title, artist)) or []), None)
if song:
return self.api.get_stream_url(song["id"])
else:
return None
def _filter_condition(self, song_obj, search_title, search_artist):
result = True
if search_title:
result = result & (song_obj["title"].lower().strip() == search_title.lower().strip())
if search_artist:
result = result & (song_obj["artist"].lower().strip() == search_artist.lower().strip())
return result
def main():
session = GpmSession()
while not session.logged_in:
session = GpmSession()
session.init()
print(session.get_song_stream("Dirty Laundry", "Bitter Sweet"))
print(session.get_song_stream("1940"))
if __name__ == "__main__":
main()
| {
"content_hash": "fc925b803a8429419bd0b33c23fbb732",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 118,
"avg_line_length": 34.93333333333333,
"alnum_prop": 0.612118320610687,
"repo_name": "sethraymond/dootbot",
"id": "b9ba2cc55a0337eb956ecbd1ac27ddd34084f851",
"size": "2096",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/libs/GooglePlayMusicController.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6089"
}
],
"symlink_target": ""
} |
import uwsgi
uwsgi.log("I am uWSGI %s" % uwsgi.version)
def application(env, start_response):
start_response('200 OK', [('Content-Type','text/html')])
uwsgi.log(str(env))
if env['PATH_INFO'] == '/logme':
uwsgi.log_this_request()
return "log written"
| {
"content_hash": "8b3cd4e6bae161e49dd87beb02400ac4",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 57,
"avg_line_length": 23.545454545454547,
"alnum_prop": 0.6718146718146718,
"repo_name": "jyotikamboj/container",
"id": "530022dde89605ab87135a9fadc1bb5ff9523442",
"size": "259",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "uw-tests/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "130"
},
{
"name": "Assembly",
"bytes": "1050"
},
{
"name": "C",
"bytes": "6984025"
},
{
"name": "C#",
"bytes": "5011"
},
{
"name": "C++",
"bytes": "76346"
},
{
"name": "CSS",
"bytes": "332655"
},
{
"name": "Clojure",
"bytes": "4018"
},
{
"name": "CoffeeScript",
"bytes": "260"
},
{
"name": "Erlang",
"bytes": "693"
},
{
"name": "Go",
"bytes": "7990"
},
{
"name": "Java",
"bytes": "854529"
},
{
"name": "JavaScript",
"bytes": "1142584"
},
{
"name": "Lua",
"bytes": "4142"
},
{
"name": "Makefile",
"bytes": "6026"
},
{
"name": "Objective-C",
"bytes": "2621"
},
{
"name": "PHP",
"bytes": "6012"
},
{
"name": "Perl",
"bytes": "33126"
},
{
"name": "Perl6",
"bytes": "2994"
},
{
"name": "Python",
"bytes": "10715733"
},
{
"name": "Ruby",
"bytes": "654838"
},
{
"name": "Scala",
"bytes": "2829497"
},
{
"name": "Shell",
"bytes": "28228"
},
{
"name": "TeX",
"bytes": "7441"
},
{
"name": "VimL",
"bytes": "37498"
},
{
"name": "XSLT",
"bytes": "4275"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.