text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.exception import SqlmapUnsupportedFeatureException
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def __init__(self):
GenericFilesystem.__init__(self)
def readFile(self, rFile):
errMsg = "File system read access not yet implemented for "
errMsg += "Oracle"
raise SqlmapUnsupportedFeatureException(errMsg)
def writeFile(self, wFile, dFile, fileType=None, forceCheck=False):
errMsg = "File system write access not yet implemented for "
errMsg += "Oracle"
raise SqlmapUnsupportedFeatureException(errMsg)
| goofwear/raspberry_pwn | src/pentest/sqlmap/plugins/dbms/oracle/filesystem.py | Python | gpl-3.0 | 792 | 0.001263 |
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from datetime import datetime
from django.http import HttpResponse
from GarageWarden import status, settingHelper, settingView, config, settings as gw_settings
import RPi.GPIO as GPIO
settings = None
settings_loaded = False
def reload_config():
global settings, settings_loaded
settings_loaded = True
settings = settingHelper.values_for_prefix("email")
settingView.reload_methods['notify'] = reload_config
def send_mail(subject, text, html=None):
if not get_setting('enabled'):
print('email not enabled')
return
encryption = (get_setting('encryption') or '').lower()
host = get_setting('host')
port = int(get_setting('port'))
if encryption == 'ssl':
smtp = smtplib.SMTP_SSL(host=host, port=port)
else:
smtp = smtplib.SMTP(host=host, port=port)
if encryption == 'tls':
smtp.starttls()
if get_setting('username') and get_setting('password'):
smtp.login(get_setting('username'), get_setting('password'))
_from = get_setting('from name') or 'GarageWarden'
recipients = get_setting('recipients')
msg = MIMEMultipart("alternative")
msg['Subject'] = subject
msg['From'] = _from
msg['To'] = recipients
if text:
msg.attach(MIMEText(text, "plain"))
if html:
msg.attach(MIMEText(html, "html"))
smtp.sendmail(_from, [r.strip() for r in recipients.split(',') if r], msg.as_string())
def send_state_change_mail(state, color, date):
if get_setting('Status Notification'):
send_mail("Garage " + state, make_text(state, date), make_html(state, color, date))
else:
print('status emails not enabled')
def make_html(state, color, date):
return "Garage was <span style='color: " + color + "'><strong>" + state + "</strong></span> at <i>" + date + "</i>"
def make_text(state, date):
return "Garage was " + state + " at " + date
def state_change():
now = datetime.now()
now_str = now.strftime("%d-%b-%Y %H:%M:%S")
opened = status.garage_is_full_open()
closed = status.garage_is_full_close()
print("State changed to opened: "+str(opened)+" closed: "+str(closed)+" at" + now_str)
if opened:
send_state_change_mail("Opened", "#f0ad4e", now_str)
elif closed:
send_state_change_mail("Closed", "#5cb85c", now_str)
config.state_change_callbacks['notify'] = state_change
def test_email(request):
global settings
print('sending test emails')
if not get_setting('enabled'):
return HttpResponse("Email not enabled")
send_state_change_mail("Test", "#5bc0de", datetime.now().strftime("%d-%b-%Y %H:%M:%S"))
return HttpResponse("Test email sent")
def get_setting(setting):
if not settings_loaded:
reload_config()
return settings[setting]
def start_beep():
GPIO.output(gw_settings.BEEPER_PIN, True)
def stop_beep():
GPIO.output(gw_settings.BEEPER_PIN, False)
| narcolepticsnowman/GarageWarden | GarageWarden/notify.py | Python | mit | 3,025 | 0.002314 |
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osglight"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgUtil
from osgpypp import osgViewer
# Translated from file 'osglight.cpp'
# OpenSceneGraph example, osglight.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osgViewer/Viewer>
#include <osg/Group>
#include <osg/Node>
#include <osg/Light>
#include <osg/LightSource>
#include <osg/StateAttribute>
#include <osg/Geometry>
#include <osg/Point>
#include <osg/MatrixTransform>
#include <osg/PositionAttitudeTransform>
#include <osgDB/Registry>
#include <osgDB/ReadFile>
#include <osgUtil/Optimizer>
#include <osgUtil/SmoothingVisitor>
#include "stdio.h"
# callback to make the loaded model oscilate up and down.
class ModelTransformCallback (osg.NodeCallback) :
ModelTransformCallback( osg.BoundingSphere bs)
_firstTime = 0.0
_period = 4.0
_range = bs.radius()*0.5
virtual void operator()(osg.Node* node, osg.NodeVisitor* nv)
pat = dynamic_cast<osg.PositionAttitudeTransform*>(node)
frameStamp = nv.getFrameStamp()
if pat and frameStamp :
if _firstTime==0.0 :
_firstTime = frameStamp.getSimulationTime()
phase = (frameStamp.getSimulationTime()-_firstTime)/_period
phase -= floor(phase)
phase *= (2.0 * osg.PI)
rotation = osg.Quat()
rotation.makeRotate(phase,1.0,1.0,1.0)
pat.setAttitude(rotation)
pat.setPosition(osg.Vec3(0.0,0.0,sin(phase))*_range)
# must traverse the Node's subgraph
traverse(node,nv)
_firstTime = double()
_period = double()
_range = double()
def createLights(bb, rootStateSet):
lightGroup = osg.Group()
modelSize = bb.radius()
# create a spot light.
myLight1 = osg.Light()
myLight1.setLightNum(0)
myLight1.setPosition(osg.Vec4(bb.corner(4),1.0))
myLight1.setAmbient(osg.Vec4(1.0,0.0,0.0,1.0))
myLight1.setDiffuse(osg.Vec4(1.0,0.0,0.0,1.0))
myLight1.setSpotCutoff(20.0)
myLight1.setSpotExponent(50.0)
myLight1.setDirection(osg.Vec3(1.0,1.0,-1.0))
lightS1 = osg.LightSource()
lightS1.setLight(myLight1)
lightS1.setLocalStateSetModes(osg.StateAttribute.ON)
lightS1.setStateSetModes(*rootStateSet,osg.StateAttribute.ON)
lightGroup.addChild(lightS1)
# create a local light.
myLight2 = osg.Light()
myLight2.setLightNum(1)
myLight2.setPosition(osg.Vec4(0.0,0.0,0.0,1.0))
myLight2.setAmbient(osg.Vec4(0.0,1.0,1.0,1.0))
myLight2.setDiffuse(osg.Vec4(0.0,1.0,1.0,1.0))
myLight2.setConstantAttenuation(1.0)
myLight2.setLinearAttenuation(2.0/modelSize)
myLight2.setQuadraticAttenuation(2.0/osg.square(modelSize))
lightS2 = osg.LightSource()
lightS2.setLight(myLight2)
lightS2.setLocalStateSetModes(osg.StateAttribute.ON)
lightS2.setStateSetModes(*rootStateSet,osg.StateAttribute.ON)
mt = osg.MatrixTransform()
# set up the animation path
animationPath = osg.AnimationPath()
animationPath.insert(0.0,osg.AnimationPath.ControlPoint(bb.corner(0)))
animationPath.insert(1.0,osg.AnimationPath.ControlPoint(bb.corner(1)))
animationPath.insert(2.0,osg.AnimationPath.ControlPoint(bb.corner(2)))
animationPath.insert(3.0,osg.AnimationPath.ControlPoint(bb.corner(3)))
animationPath.insert(4.0,osg.AnimationPath.ControlPoint(bb.corner(4)))
animationPath.insert(5.0,osg.AnimationPath.ControlPoint(bb.corner(5)))
animationPath.insert(6.0,osg.AnimationPath.ControlPoint(bb.corner(6)))
animationPath.insert(7.0,osg.AnimationPath.ControlPoint(bb.corner(7)))
animationPath.insert(8.0,osg.AnimationPath.ControlPoint(bb.corner(0)))
animationPath.setLoopMode(osg.AnimationPath.SWING)
mt.setUpdateCallback(osg.AnimationPathCallback(animationPath))
# create marker for point light.
marker = osg.Geometry()
vertices = osg.Vec3Array()
vertices.push_back(osg.Vec3(0.0,0.0,0.0))
marker.setVertexArray(vertices)
marker.addPrimitiveSet(osg.DrawArrays(GL_POINTS,0,1))
stateset = osg.StateSet()
point = osg.Point()
point.setSize(4.0)
stateset.setAttribute(point)
marker.setStateSet(stateset)
markerGeode = osg.Geode()
markerGeode.addDrawable(marker)
mt.addChild(lightS2)
mt.addChild(markerGeode)
lightGroup.addChild(mt)
return lightGroup
def createWall(v1, v2, v3, stateset):
# create a drawable for occluder.
geom = osg.Geometry()
geom.setStateSet(stateset)
noXSteps = 100
noYSteps = 100
coords = osg.Vec3Array()
coords.reserve(noXSteps*noYSteps)
dx = (v2-v1)/((float)noXSteps-1.0)
dy = (v3-v1)/((float)noYSteps-1.0)
row = unsigned int()
vRowStart = v1
for(row=0row<noYSteps++row)
v = vRowStart
for(unsigned int col=0col<noXSteps++col)
coords.push_back(v)
v += dx
vRowStart+=dy
geom.setVertexArray(coords)
colors = osg.Vec4Array(1)
(*colors)[0].set(1.0,1.0,1.0,1.0)
geom.setColorArray(colors, osg.Array.BIND_OVERALL)
for(row=0row<noYSteps-1++row)
quadstrip = osg.DrawElementsUShort(osg.PrimitiveSet.QUAD_STRIP)
quadstrip.reserve(noXSteps*2)
for(unsigned int col=0col<noXSteps++col)
quadstrip.push_back((row+1)*noXSteps+col)
quadstrip.push_back(row*noXSteps+col)
geom.addPrimitiveSet(quadstrip)
# create the normals.
osgUtil.SmoothingVisitor.smooth(*geom)
return geom
def createRoom(loadedModel):
# default scale for this model.
bs = osg.BoundingSphere(osg.Vec3(0.0,0.0,0.0),1.0)
root = osg.Group()
if loadedModel :
loaded_bs = loadedModel.getBound()
pat = osg.PositionAttitudeTransform()
pat.setPivotPoint(loaded_bs.center())
pat.setUpdateCallback(ModelTransformCallback(loaded_bs))
pat.addChild(loadedModel)
bs = pat.getBound()
root.addChild(pat)
bs.radius()*=1.5
# create a bounding box, which we'll use to size the room.
bb = osg.BoundingBox()
bb.expandBy(bs)
# create statesets.
rootStateSet = osg.StateSet()
root.setStateSet(rootStateSet)
wall = osg.StateSet()
wall.setMode(GL_CULL_FACE,osg.StateAttribute.ON)
floor = osg.StateSet()
floor.setMode(GL_CULL_FACE,osg.StateAttribute.ON)
roof = osg.StateSet()
roof.setMode(GL_CULL_FACE,osg.StateAttribute.ON)
geode = osg.Geode()
# create front side.
geode.addDrawable(createWall(bb.corner(0),
bb.corner(4),
bb.corner(1),
wall))
# right side
geode.addDrawable(createWall(bb.corner(1),
bb.corner(5),
bb.corner(3),
wall))
# left side
geode.addDrawable(createWall(bb.corner(2),
bb.corner(6),
bb.corner(0),
wall))
# back side
geode.addDrawable(createWall(bb.corner(3),
bb.corner(7),
bb.corner(2),
wall))
# floor
geode.addDrawable(createWall(bb.corner(0),
bb.corner(1),
bb.corner(2),
floor))
# roof
geode.addDrawable(createWall(bb.corner(6),
bb.corner(7),
bb.corner(4),
roof))
root.addChild(geode)
root.addChild(createLights(bb,rootStateSet))
return root
def main(argv):
# use an ArgumentParser object to manage the program arguments.
arguments = osg.ArgumentParser(argv)
# construct the viewer.
viewer = osgViewer.Viewer()
# load the nodes from the commandline arguments.
loadedModel = osgDB.readNodeFiles(arguments)
# if not loaded assume no arguments passed in, try use default mode instead.
if not loadedModel : loadedModel = osgDB.readNodeFile("glider.osgt")
# create a room made of foor walls, a floor, a roof, and swinging light fitting.
rootnode = createRoom(loadedModel)
# run optimization over the scene graph
optimzer = osgUtil.Optimizer()
optimzer.optimize(rootnode)
# add a viewport to the viewer and attach the scene graph.
viewer.setSceneData( rootnode )
# create the windows and run the threads.
viewer.realize()
viewer.getCamera().setCullingMode( viewer.getCamera().getCullingMode() ~osg.CullStack.SMALL_FEATURE_CULLING)
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
| JaneliaSciComp/osgpyplusplus | examples/rough_translated1/osglight.py | Python | bsd-3-clause | 10,059 | 0.015409 |
# GeneaCrystal Copyright (C) 2012-2013
# Christian Jaeckel, <christian.doe@gmail.com>
# Frederic Kerber, <fkerber@gmail.com>
# Pascal Lessel, <maverickthe6@gmail.com>
# Michael Mauderer, <mail@michaelmauderer.de>
#
# GeneaCrystal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GeneaCrystal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GeneaCrystal. If not, see <http://www.gnu.org/licenses/>.
import libavg as avg
import pymunk
from geneacrystal import util, physic
from geneacrystal.alphaKeyboard import AlphaKeyboard
from geneacrystal.highscore import Highscore
class ItemImageNode(avg.DivNode):
def __init__(self, href, size, *args, **kwargs):
avg.DivNode.__init__(self, *args, **kwargs)
self.pivot = 0, 0
self.opacity = 1
self.sensitive = False
imageNode = avg.ImageNode(parent=self,
opacity=1,
href=href,
size=size,
)
imageNode.pos = util.vectorMult(size, -0.5)
self.image = imageNode
if __debug__:
self.elementoutlinecolor = "FFFFFF"
@property
def size(self):
return self.image.size
@size.setter
def size(self, value):
self.image.size = value
util.centerNodeOnPosition(self.image, (0,0))
def setEffect(self, node):
self.image.setEffect(node)
def setEventHandler(self, *args, **kwargs):
return self.image.setEventHandler(*args, **kwargs)
class TouchPointNode(avg.CircleNode):
def delete(self):
self.unlink(True)
def __init__(self, space, theme=None, owner=None, *args, **kwargs):
avg.CircleNode.__init__(self, *args, **kwargs)
if theme is None:
from geneacrystal import themes
self._theme = themes.DefaultTheme
self.owner = owner
self._body = physic.TouchPointBody(self)
self._body.position = tuple(self.pos)
self.filltexhref = self._theme.getStaticImage("TouchPointNode")
#self.fillcolor = "00FF00"
self.strokewidth = 0
self.shape = pymunk.Circle(self._body, self.r, (0, 0))
self.shape.elasticity = 1
self.shape.collision_type = physic.TouchPointCollisionType
space.add(self._body, self.shape)
if __debug__:
print "Created ", self
def __str__(self, *args, **kwargs):
formatString = "TouchPointNode(pos={tp.pos}, owner={tp.owner})"
return formatString.format(tp=self)
class ShieldNode(avg.LineNode):
def __init__(self, space, owner=None, *args, **kwargs):
avg.LineNode.__init__(self, *args, **kwargs)
self._body = physic.ShieldBody(self)
self.owner = owner
self._body.position = tuple(self.pos1)
from geneacrystal import themes
self.texhref = themes.DefaultTheme.getStaticImage("Wall")
self.fillopacity = 0
self.opacity = 1
space.add(self._body, self._body.shape)
self._body.sleep()
def update(self, pos1, pos2):
self.pos1 = pos1
self.pos2 = pos2
self._body.position = tuple(self.pos1)
self._body.shape.b = util.transformVector((pos2.x - pos1.x, pos2.y - pos1.y))
def delete(self):
pass
class HighscoreEntryNode(avg.DivNode):
def __init__(self, mode, score, allScores, callback=None, theme=None, *args, **kwargs):
avg.DivNode.__init__(self, *args, **kwargs)
if theme is None:
from geneacrystal import themes
theme = themes.DefaultTheme
bgPath = theme.getStaticImage("keySymbol")
backPath = theme.getStaticImage("backspaceSymbol")
enterPath = theme.getStaticImage("enterSymbol")
shiftPath = theme.getStaticImage("shiftSymbol")
emptyPath = theme.getStaticImage("spaceSymbol")
highscore = Highscore(mode)
myScores = []
myScores.extend(allScores)
myScores.extend(highscore.scores)
myScores.sort(reverse=True, key=lambda val: int(val))
if len(myScores) < util.MAX_HIGHSCORE_LENGTH or score > int(myScores[9]) or score == int(myScores[9]) and not score in highscore.scores:
self.__value = ""
def onKeyDown(keyCode):
if len(self.__value) < 20:
self.__value += keyCode
self.__edit.text += keyCode
def onBack():
self.__value = self.__value[0:-1]
self.__edit.text = self.__value
def onEnter():
if not self.__value == "":
highscore.addEntry(self.__value, score)
if callback is not None:
callback(self.__value)
self._keyboard.cleanup()
self._keyboard.unlink(True)
self._keyboard = None
self.__edit.unlink(True)
self.__edit = None
self.unlink(True)
self.__edit = avg.WordsNode(size=(self.size.x, self.size.y // 8),
parent=self, fontsize=self.size.y // 8,
alignment="center")
self.__edit.pos = (self.size.x // 2, 0)
self._keyboard = AlphaKeyboard(bgPath, backPath, enterPath, shiftPath,
emptyPath , onKeyDown=onKeyDown,
onBack=onBack, onEnter=onEnter,
size=(self.size.x, self.size.y // 10 * 8),
pos=(0, self.size.y // 5),
parent=self)
else:
if callback is not None:
callback("")
self.unlink(True)
class ItemImageLayeredNode(avg.DivNode):
def __init__(self, layers,size, *args, **kwargs):
avg.DivNode.__init__(self, *args, **kwargs)
self.pivot = 0, 0
self.opacity = 1
self.sensitive = False
childPos = util.vectorMult(size, -0.5)
self._layer = []
self._topImage = None
for image in layers:
node = avg.ImageNode(parent=self,
opacity=1,
href=image,
size=size,
pos=childPos,
sensitive=False
)
self._layer.append(node)
node.sensitive=True
self._topImage = self._layer[-1]
def removeLayer(self, index):
node = self._layer[index]
node.unlink(True)
self._layer.remove(node)
if node == self._topImage:
self._topImage = self._layer[-1]
@property
def size(self):
return self._layer[0].size
def setEventHandler(self, *args, **kwargs):
return self._topImage.setEventHandler(*args, **kwargs)
def setEffect(self, *args, **kwargs):
for node in self._layer:
node.setEffect(*args, **kwargs)
class OverlayNode(avg.DivNode):
def __init__(self, theme=None, *args, **kwargs):
if theme is None:
from geneacrystal import themes
theme = themes.StandardTheme()
super(OverlayNode, self).__init__(*args, **kwargs)
self._background=theme.getNode("ExitButton")(size=self.size, parent=self, opacity=1);
class StaticOverlayNode(OverlayNode):
def __init__(self, finishCB, *args, **kwargs):
super(StaticOverlayNode, self).__init__(*args, **kwargs)
self.__anim = None
self.__initalRadius=self._background.size.x*0.08
self.__circle = avg.CircleNode(pos=(self._background.size.x//2, self._background.size.y//2), r=self.__initalRadius, fillcolor="000000", fillopacity=1.0, parent=self)
self.__finishCB = finishCB
self.setEventHandler(avg.CURSORDOWN,avg.TOUCH | avg.MOUSE, lambda x: self.__start())
self.setEventHandler(avg.CURSOROUT,avg.TOUCH | avg.MOUSE, lambda x: self.__abort())
self.setEventHandler(avg.CURSORUP,avg.TOUCH | avg.MOUSE, lambda x: self.__abort())
def __start(self):
self.__circle.sensitive=False
self.__aborted = True
if self.__anim is not None:
self.__anim.abort()
self.__anim = avg.LinearAnim(self.__circle,"r", 2000, self.__circle.r, self._background.size.y//2, False, None, self.__finish)
self.__aborted = False
self.__anim.start()
def __abort(self):
if self.__anim is not None:
self.__aborted = True
self.__anim.abort()
self.__anim = None
self.__circle.r = self.__initalRadius
self.__circle.sensitive=True
def __finish(self):
if not self.__aborted:
self.__anim = None
self.__finishCB()
self.__circle.r = self.__initalRadius
self.__circle.sensitive=True
| MichaelMauderer/GeneaCrystal | geneacrystal/nodes.py | Python | gpl-3.0 | 10,098 | 0.008418 |
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from alembic import op
from neutron.db import migration
from sqlalchemy.engine import reflection
"""update_ha_group_primary_key
Revision ID: 73c84db9f299
Revises: 972479e0e629
Create Date: 2017-10-05 05:31:54.243849
"""
# revision identifiers, used by Alembic.
revision = '73c84db9f299'
down_revision = '972479e0e629'
def upgrade():
if migration.schema_has_table('cisco_router_ha_groups'):
inspector = reflection.Inspector.from_engine(op.get_bind())
foreign_keys = inspector.get_foreign_keys('cisco_router_ha_groups')
migration.remove_foreign_keys('cisco_router_ha_groups', foreign_keys)
primary_key = inspector.get_pk_constraint('cisco_router_ha_groups')
op.drop_constraint(constraint_name=primary_key['name'],
table_name='cisco_router_ha_groups',
type_='primary')
op.create_primary_key(
constraint_name='pk_cisco_router_ha_groups',
table_name='cisco_router_ha_groups',
columns=['ha_port_id', 'subnet_id'])
op.create_foreign_key('cisco_router_ha_groups_ibfk_1',
source_table='cisco_router_ha_groups',
referent_table='ports',
local_cols=['ha_port_id'],
remote_cols=['id'],
ondelete='CASCADE'),
op.create_foreign_key('cisco_router_ha_groups_ibfk_2',
source_table='cisco_router_ha_groups',
referent_table='ports',
local_cols=['extra_port_id'],
remote_cols=['id'],
ondelete='SET NULL'),
op.create_foreign_key('cisco_router_ha_groups_ibfk_3',
source_table='cisco_router_ha_groups',
referent_table='subnets',
local_cols=['subnet_id'],
remote_cols=['id'])
op.create_foreign_key('cisco_router_ha_groups_ibfk_4',
source_table='cisco_router_ha_groups',
referent_table='routers',
local_cols=['user_router_id'],
remote_cols=['id'])
| noironetworks/networking-cisco | networking_cisco/db/migration/alembic_migrations/versions/mitaka/expand/73c84db9f299_update_ha_group_primary_key.py | Python | apache-2.0 | 2,956 | 0 |
from fancypages.templatetags.fp_container_tags import *
| tangentlabs/django-oscar-fancypages | oscar_fancypages/fancypages/templatetags/fp_container_tags.py | Python | bsd-3-clause | 56 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-23 15:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_ca', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='cn',
field=models.CharField(max_length=64, verbose_name='CommonName'),
),
migrations.AlterField(
model_name='certificate',
name='csr',
field=models.TextField(verbose_name='CSR'),
),
migrations.AlterField(
model_name='certificate',
name='pub',
field=models.TextField(verbose_name='Public key'),
),
migrations.AlterField(
model_name='certificate',
name='revoked_date',
field=models.DateTimeField(blank=True, null=True, verbose_name='Revoked on'),
),
migrations.AlterField(
model_name='certificate',
name='revoked_reason',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='Reason for revokation'),
),
]
| fsinf/certificate-authority | ca/django_ca/migrations/0002_auto_20151223_1508.py | Python | gpl-3.0 | 1,224 | 0.001634 |
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT, included in this distribution as LICENSE
""" """
from rowgenerators.appurl.file.file import FileUrl
from rowgenerators.exceptions import AppUrlError
class ZipUrlError(AppUrlError):
pass
class ZipUrl(FileUrl):
"""Zip URLS represent a zip file, as a local resource. """
match_priority = FileUrl.match_priority - 10
def __init__(self, url=None, downloader=None, **kwargs):
kwargs['resource_format'] = 'zip'
super().__init__(url, downloader=downloader, **kwargs)
@property
def target_file(self):
"""
Returns the target file, which is usually stored in the first slot in the ``fragment``,
but may have been overridden with a ``fragment_query``.
:return:
"""
if self._target_file:
return self._target_file
if self.fragment[0]:
return self.fragment[0]
for ext in ('csv', 'xls', 'xlsx'):
if self.resource_file.endswith('.' + ext + '.zip'):
return self.resource_file.replace('.zip', '')
# Want to return none, so get_files_from-zip can assume to use the first file in the archive.
return None
def join_target(self, tf):
"""
Joins the target ``tf`` by setting the value of the first slot of the fragment.
:param tf:
:return: a clone of this url with a new fragment.
"""
u = self.clone()
try:
tf = str(tf.path)
except:
pass
u.fragment = [tf, u.fragment[1]] # In case its a tuple, don't edit in place
return u
def get_resource(self):
return self
@property
def zip_dir(self):
"""Directory that files will be extracted to"""
from os.path import abspath
cache_dir = self.downloader.cache.getsyspath('/')
target_path = abspath(self.fspath)
if target_path.startswith(cache_dir): # Case when file is already in cache
return str(self.fspath) + '_d'
else: # file is not in cache; it may exist elsewhere.
return self.downloader.cache.getsyspath(target_path.lstrip('/'))+'_d'
def get_target(self):
"""
Extract the target file from the archive, store it in the cache, and return a file Url to the
cached file.
"""
from rowgenerators.appurl.url import parse_app_url
from zipfile import ZipFile
import io
from os.path import join, dirname
from rowgenerators.appurl.util import copy_file_or_flo, ensure_dir
assert self.zip_dir
zf = ZipFile(str(self.fspath))
self._target_file = ZipUrl.get_file_from_zip(self)
target_path = join(self.zip_dir, self.target_file)
ensure_dir(dirname(target_path))
with io.open(target_path, 'wb') as f, zf.open(self.target_file) as flo:
copy_file_or_flo(flo, f)
fq = self.fragment_query
if 'resource_format' in fq:
del fq['resource_format']
if 'resource_file' in fq:
del fq['resource_file']
tu = parse_app_url(target_path,
fragment_query=fq,
fragment=[self.target_segment, None],
scheme_extension=self.scheme_extension,
# Clear out the resource info so we don't get a ZipUrl
downloader=self.downloader
)
if self.target_format != tu.target_format:
try:
tu.target_format = self.target_format
except AttributeError:
pass # Some URLS don't allow resetting target type.
return tu
def list(self):
"""List the files in the referenced Zip file"""
from zipfile import ZipFile
if self.target_file:
return list(self.set_target_segment(tl.target_segment) for tl in self.get_target().list())
else:
real_files = ZipUrl.real_files_in_zf(ZipFile(str(self.fspath)))
return list(self.set_target_file(rf) for rf in real_files)
@staticmethod
def get_file_from_zip(url):
"""Given a file name that may be a regular expression, return the full name for the file
from a zip archive"""
from zipfile import ZipFile
import re
names = []
zf = ZipFile(str(url.fspath))
nl = list(ZipUrl.real_files_in_zf(zf)) # Old way, but maybe gets links? : list(zf.namelist())
tf = url.target_file
ts = url.target_segment
if not nl:
# sometimes real_files_in_zf doesn't work at all. I don't know why it does work,
# so I certainly don't know why it does not.
nl = list(zf.namelist())
# the target_file may be a string, or a regular expression
if tf:
names = list([e for e in nl if re.search(tf, e)
and not (e.startswith('__') or e.startswith('.'))
])
if len(names) > 0:
return names[0]
# The segment, if it exists, can only be an integer, and should probably be
# '0' to indicate the first file. This clause is probably a bad idea, since
# andy other integer is probably meaningless.
if ts:
try:
return nl[int(ts)]
except (IndexError, ValueError):
pass
# Just return the first file in the archive.
if not tf and not ts:
return nl[0]
else:
raise ZipUrlError("Could not find file in Zip {} for target='{}' nor segment='{}'"
.format(url.fspath, url.target_file, url.target_segment))
@staticmethod
def real_files_in_zf(zf):
"""Return a list of internal paths of real files in a zip file, based on the 'external_attr' values"""
from os.path import basename
for e in zf.infolist():
# Get rid of __MACOS and .DS_whatever
if basename(e.filename).startswith('__') or basename(e.filename).startswith('.'):
continue
# I really don't understand external_attr, but no one else seems to either,
# so we're just hacking here.
# e.external_attr>>31&1 works when the archive has external attrs set, and a dir heirarchy
# e.external_attr==0 works in cases where there are no external attrs set
# e.external_attr==32 is true for some single-file archives.
if bool(e.external_attr >> 31 & 1 or e.external_attr == 0 or e.external_attr == 32):
yield e.filename
@classmethod
def _match(cls, url, **kwargs):
return url.resource_format == 'zip' or kwargs.get('force_archive')
| CivicKnowledge/rowgenerators | rowgenerators/appurl/archive/zip.py | Python | mit | 6,923 | 0.004911 |
"""
This module is used to select features or proposals
"""
def select_preceding(features, k):
""" select preceding k features or proposals for each image
:param k: preceding k features or proposals for each image are selected
:type k: integer
:return: selected features or proposals
:rtype: list. Each element is a k'-by-m ndarray, where m is feature
dimension or 4 for proposals. If there are enough features or proposals
for selection, then k' = k, else all features or proposals are
selected.
"""
return [i[:k] for i in features]
| zhitiancheng/cliff | cliff/selection.py | Python | gpl-2.0 | 593 | 0 |
__version__ = "1.4.3"
import sys
import os
# verify that pygame is on the machine
try:
import pygame
except Exception:
print("Pygame doesn't seem to be installed on this machine.")
# add thorpy folder to Windows and Python search paths
THORPY_PATH = os.path.abspath(os.path.dirname(__file__))
try:
os.environ['PATH'] = ';'.join((THORPY_PATH, os.environ['PATH']))
sys.path.append(THORPY_PATH)
except Exception:
print("Couldn't add Thor to sys.path...\nThorPy path : " + THORPY_PATH)
USEREVENT = pygame.USEREVENT + 1 #horpy takes one event on pygame's userevents
#import subpackages
import thorpy.elements
import thorpy.menus
import thorpy._utils
import thorpy.miscgui
import thorpy.painting as painting
import thorpy.miscgui.application as application
import thorpy.miscgui.storage as storage
import testmodule
# not all elements are imported ; only those that can be safely used by lambda
# user.
from thorpy.elements.launchers.boxlauncher import BoxLauncher
from thorpy.elements.launchers.browserlauncher import BrowserLauncher
from thorpy.elements.launchers.dropdownlistlauncher import DropDownListLauncher
from thorpy.elements.launchers._launcher import _Launcher
from thorpy.elements.background import Background
from thorpy.elements.image import Image
from thorpy.elements.box import Box, BarBox
from thorpy.elements.browserlight import BrowserLight
from thorpy.elements.browser import Browser
from thorpy.elements.checker import Checker
from thorpy.elements.clickable import Clickable
from thorpy.elements._wrappers import make_button, make_text
from thorpy.elements.colorsetter import ColorSetter
from thorpy.elements.ddlf import DropDownListFast as DropDownList
from thorpy.elements.draggable import Draggable, ClickDraggable
from thorpy.elements.element import Element
from thorpy.elements.ghost import Ghost
from thorpy.elements.hoverable import Hoverable
from thorpy.elements.hoverzone import HoverZone
from thorpy.elements.inserter import Inserter
from thorpy.elements.keypressable import KeyPressable
from thorpy.elements.keytogglable import KeyTogglable
from thorpy.elements.launchers.paramsetter import ParamSetter
from thorpy.elements.pressable import Pressable
##from thorpy.elements.text import MultilineText
from thorpy.elements.text import OneLineText, MultilineText
from thorpy.elements.slidersetter import SliderXSetter as SliderX
from thorpy.elements.togglable import Togglable
from thorpy.elements.line import Line
from thorpy.elements._makeuputils._halo import Halo
from thorpy.elements._makeuputils._shadow import StaticShadow
from thorpy.elements._makeuputils._shadow import DynamicShadow
# menus:
from thorpy.menus.tickedmenu import TickedMenu as Menu
from thorpy.menus.basicmenu import BasicMenu
# miscellaneous stuff, constants, parameters
from thorpy.miscgui.application import Application
from thorpy.miscgui.reaction import Reaction, ConstantReaction
from thorpy.miscgui import constants, functions
from thorpy.miscgui import style
from thorpy.miscgui import painterstyle
from thorpy.miscgui import parameters
from thorpy.miscgui.initializer import Initializer
from thorpy.miscgui.state import State
from thorpy.miscgui.storage import Storer, store
from thorpy.miscgui.title import Title
from thorpy.miscgui.varset import VarSet
from thorpy.miscgui import theme
from thorpy.miscgui.theme import set_theme as set_theme
from thorpy.painting.writer import Writer
from thorpy.painting import painters
from thorpy.painting import makeup
from thorpy.gamestools.basegrid import BaseGrid
from thorpy.gamestools.grid import Grid
del thorpy, pygame, os, sys
| YannThorimbert/ThorPy-1.4.3 | thorpy/__init__.py | Python | mit | 3,614 | 0.017432 |
# Copyright (C) 2008 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gettext import gettext as _
import logging
import os
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GConf
from gi.repository import Gio
from gi.repository import GLib
from sugar3.graphics import style
from sugar3.graphics.palette import Palette
from sugar3.graphics.menuitem import MenuItem
from sugar3.graphics.icon import Icon
from sugar3.graphics.xocolor import XoColor
from sugar3.graphics.alert import Alert
from sugar3 import mime
from jarabe.model import friends
from jarabe.model import filetransfer
from jarabe.model import mimeregistry
from jarabe.journal import misc
from jarabe.journal import model
from jarabe.journal import journalwindow
class ObjectPalette(Palette):
__gtype_name__ = 'ObjectPalette'
__gsignals__ = {
'detail-clicked': (GObject.SignalFlags.RUN_FIRST, None,
([str])),
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, metadata, detail=False):
self._metadata = metadata
activity_icon = Icon(icon_size=Gtk.IconSize.LARGE_TOOLBAR)
activity_icon.props.file = misc.get_icon_name(metadata)
color = misc.get_icon_color(metadata)
activity_icon.props.xo_color = color
if 'title' in metadata:
title = GObject.markup_escape_text(metadata['title'])
else:
title = GLib.markup_escape_text(_('Untitled'))
Palette.__init__(self, primary_text=title,
icon=activity_icon)
if misc.get_activities(metadata) or misc.is_bundle(metadata):
if metadata.get('activity_id', ''):
resume_label = _('Resume')
resume_with_label = _('Resume with')
else:
resume_label = _('Start')
resume_with_label = _('Start with')
menu_item = MenuItem(resume_label, 'activity-start')
menu_item.connect('activate', self.__start_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(resume_with_label, 'activity-start')
self.menu.append(menu_item)
menu_item.show()
start_with_menu = StartWithMenu(self._metadata)
menu_item.set_submenu(start_with_menu)
else:
menu_item = MenuItem(_('No activity to start entry'))
menu_item.set_sensitive(False)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Copy to'))
icon = Icon(icon_name='edit-copy', xo_color=color,
icon_size=Gtk.IconSize.MENU)
menu_item.set_image(icon)
self.menu.append(menu_item)
menu_item.show()
copy_menu = CopyMenu(metadata)
copy_menu.connect('volume-error', self.__volume_error_cb)
menu_item.set_submenu(copy_menu)
if self._metadata['mountpoint'] == '/':
menu_item = MenuItem(_('Duplicate'))
icon = Icon(icon_name='edit-duplicate', xo_color=color,
icon_size=Gtk.IconSize.MENU)
menu_item.set_image(icon)
menu_item.connect('activate', self.__duplicate_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Send to'), 'document-send')
self.menu.append(menu_item)
menu_item.show()
friends_menu = FriendsMenu()
friends_menu.connect('friend-selected', self.__friend_selected_cb)
menu_item.set_submenu(friends_menu)
if detail == True:
menu_item = MenuItem(_('View Details'), 'go-right')
menu_item.connect('activate', self.__detail_activate_cb)
self.menu.append(menu_item)
menu_item.show()
menu_item = MenuItem(_('Erase'), 'list-remove')
menu_item.connect('activate', self.__erase_activate_cb)
self.menu.append(menu_item)
menu_item.show()
def __start_activate_cb(self, menu_item):
misc.resume(self._metadata)
def __duplicate_activate_cb(self, menu_item):
file_path = model.get_file(self._metadata['uid'])
try:
model.copy(self._metadata, '/')
except IOError, e:
logging.exception('Error while copying the entry. %s', e.strerror)
self.emit('volume-error',
_('Error while copying the entry. %s') % e.strerror,
_('Error'))
def __erase_activate_cb(self, menu_item):
alert = Alert()
erase_string = _('Erase')
alert.props.title = erase_string
alert.props.msg = _('Do you want to permanently erase \"%s\"?') \
% self._metadata['title']
icon = Icon(icon_name='dialog-cancel')
alert.add_button(Gtk.ResponseType.CANCEL, _('Cancel'), icon)
icon.show()
ok_icon = Icon(icon_name='dialog-ok')
alert.add_button(Gtk.ResponseType.OK, erase_string, ok_icon)
ok_icon.show()
alert.connect('response', self.__erase_alert_response_cb)
journalwindow.get_journal_window().add_alert(alert)
alert.show()
def __erase_alert_response_cb(self, alert, response_id):
journalwindow.get_journal_window().remove_alert(alert)
if response_id is Gtk.ResponseType.OK:
model.delete(self._metadata['uid'])
def __detail_activate_cb(self, menu_item):
self.emit('detail-clicked', self._metadata['uid'])
def __volume_error_cb(self, menu_item, message, severity):
self.emit('volume-error', message, severity)
def __friend_selected_cb(self, menu_item, buddy):
logging.debug('__friend_selected_cb')
file_name = model.get_file(self._metadata['uid'])
if not file_name or not os.path.exists(file_name):
logging.warn('Entries without a file cannot be sent.')
self.emit('volume-error',
_('Entries without a file cannot be sent.'),
_('Warning'))
return
title = str(self._metadata['title'])
description = str(self._metadata.get('description', ''))
mime_type = str(self._metadata['mime_type'])
if not mime_type:
mime_type = mime.get_for_file(file_name)
filetransfer.start_transfer(buddy, file_name, title, description,
mime_type)
class CopyMenu(Gtk.Menu):
__gtype_name__ = 'JournalCopyMenu'
__gsignals__ = {
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, metadata):
Gtk.Menu.__init__(self)
self._metadata = metadata
clipboard_menu = ClipboardMenu(self._metadata)
clipboard_menu.set_image(Icon(icon_name='toolbar-edit',
icon_size=Gtk.IconSize.MENU))
clipboard_menu.connect('volume-error', self.__volume_error_cb)
self.append(clipboard_menu)
clipboard_menu.show()
if self._metadata['mountpoint'] != '/':
client = GConf.Client.get_default()
color = XoColor(client.get_string('/desktop/sugar/user/color'))
journal_menu = VolumeMenu(self._metadata, _('Journal'), '/')
journal_menu.set_image(Icon(icon_name='activity-journal',
xo_color=color,
icon_size=Gtk.IconSize.MENU))
journal_menu.connect('volume-error', self.__volume_error_cb)
self.append(journal_menu)
journal_menu.show()
documents_path = model.get_documents_path()
if documents_path is not None and not \
self._metadata['uid'].startswith(documents_path):
documents_menu = VolumeMenu(self._metadata, _('Documents'),
documents_path)
documents_menu.set_image(Icon(icon_name='user-documents',
icon_size=Gtk.IconSize.MENU))
documents_menu.connect('volume-error', self.__volume_error_cb)
self.append(documents_menu)
documents_menu.show()
volume_monitor = Gio.VolumeMonitor.get()
icon_theme = Gtk.IconTheme.get_default()
for mount in volume_monitor.get_mounts():
if self._metadata['mountpoint'] == mount.get_root().get_path():
continue
volume_menu = VolumeMenu(self._metadata, mount.get_name(),
mount.get_root().get_path())
for name in mount.get_icon().props.names:
if icon_theme.has_icon(name):
volume_menu.set_image(Icon(icon_name=name,
icon_size=Gtk.IconSize.MENU))
break
volume_menu.connect('volume-error', self.__volume_error_cb)
self.append(volume_menu)
volume_menu.show()
def __volume_error_cb(self, menu_item, message, severity):
self.emit('volume-error', message, severity)
class VolumeMenu(MenuItem):
__gtype_name__ = 'JournalVolumeMenu'
__gsignals__ = {
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, metadata, label, mount_point):
MenuItem.__init__(self, label)
self._metadata = metadata
self.connect('activate', self.__copy_to_volume_cb, mount_point)
def __copy_to_volume_cb(self, menu_item, mount_point):
file_path = model.get_file(self._metadata['uid'])
if not file_path or not os.path.exists(file_path):
logging.warn('Entries without a file cannot be copied.')
self.emit('volume-error',
_('Entries without a file cannot be copied.'),
_('Warning'))
return
try:
model.copy(self._metadata, mount_point)
except IOError, e:
logging.exception('Error while copying the entry. %s', e.strerror)
self.emit('volume-error',
_('Error while copying the entry. %s') % e.strerror,
_('Error'))
class ClipboardMenu(MenuItem):
__gtype_name__ = 'JournalClipboardMenu'
__gsignals__ = {
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self, metadata):
MenuItem.__init__(self, _('Clipboard'))
self._temp_file_path = None
self._metadata = metadata
self.connect('activate', self.__copy_to_clipboard_cb)
def __copy_to_clipboard_cb(self, menu_item):
file_path = model.get_file(self._metadata['uid'])
if not file_path or not os.path.exists(file_path):
logging.warn('Entries without a file cannot be copied.')
self.emit('volume-error',
_('Entries without a file cannot be copied.'),
_('Warning'))
return
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.set_with_data([Gtk.TargetEntry.new('text/uri-list', 0, 0)],
self.__clipboard_get_func_cb,
self.__clipboard_clear_func_cb, None)
def __clipboard_get_func_cb(self, clipboard, selection_data, info, data):
# Get hold of a reference so the temp file doesn't get deleted
self._temp_file_path = model.get_file(self._metadata['uid'])
logging.debug('__clipboard_get_func_cb %r', self._temp_file_path)
selection_data.set_uris(['file://' + self._temp_file_path])
def __clipboard_clear_func_cb(self, clipboard, data):
# Release and delete the temp file
self._temp_file_path = None
class FriendsMenu(Gtk.Menu):
__gtype_name__ = 'JournalFriendsMenu'
__gsignals__ = {
'friend-selected': (GObject.SignalFlags.RUN_FIRST, None,
([object])),
}
def __init__(self):
Gtk.Menu.__init__(self)
if filetransfer.file_transfer_available():
friends_model = friends.get_model()
for friend in friends_model:
if friend.is_present():
menu_item = MenuItem(text_label=friend.get_nick(),
icon_name='computer-xo',
xo_color=friend.get_color())
menu_item.connect('activate', self.__item_activate_cb,
friend)
self.append(menu_item)
menu_item.show()
if not self.get_children():
menu_item = MenuItem(_('No friends present'))
menu_item.set_sensitive(False)
self.append(menu_item)
menu_item.show()
else:
menu_item = MenuItem(_('No valid connection found'))
menu_item.set_sensitive(False)
self.append(menu_item)
menu_item.show()
def __item_activate_cb(self, menu_item, friend):
self.emit('friend-selected', friend)
class StartWithMenu(Gtk.Menu):
__gtype_name__ = 'JournalStartWithMenu'
def __init__(self, metadata):
Gtk.Menu.__init__(self)
self._metadata = metadata
for activity_info in misc.get_activities(metadata):
menu_item = MenuItem(activity_info.get_name())
menu_item.set_image(Icon(file=activity_info.get_icon(),
icon_size=Gtk.IconSize.MENU))
menu_item.connect('activate', self.__item_activate_cb,
activity_info.get_bundle_id())
self.append(menu_item)
menu_item.show()
if not self.get_children():
if metadata.get('activity_id', ''):
resume_label = _('No activity to resume entry')
else:
resume_label = _('No activity to start entry')
menu_item = MenuItem(resume_label)
menu_item.set_sensitive(False)
self.append(menu_item)
menu_item.show()
def __item_activate_cb(self, menu_item, service_name):
mime_type = self._metadata.get('mime_type', '')
if mime_type:
mime_registry = mimeregistry.get_registry()
mime_registry.set_default_activity(mime_type, service_name)
misc.resume(self._metadata, service_name)
class BuddyPalette(Palette):
def __init__(self, buddy):
self._buddy = buddy
nick, colors = buddy
buddy_icon = Icon(icon_name='computer-xo',
icon_size=style.STANDARD_ICON_SIZE,
xo_color=XoColor(colors))
Palette.__init__(self, primary_text=GLib.markup_escape_text(nick),
icon=buddy_icon)
# TODO: Support actions on buddies, like make friend, invite, etc.
| ajaygarg84/sugar | src/jarabe/journal/palettes.py | Python | gpl-2.0 | 15,839 | 0.000126 |
#coding=utf-8
import argparse
import json
import os
from smartqq import start_qq, list_messages, create_db
def load_pluginconfig(configjson):
config = None
if configjson is not None:
if os.path.isfile(configjson):
with open(configjson, "r") as f:
config = json.load(f)
else:
print("unable to load the configuration file for plugins, default settings will be used.")
return config
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-gui",
action="store_true",
default=False,
help="Whether display QRCode with tk and PIL."
)
parser.add_argument(
"--new-user",
action="store_true",
default=False,
help="Logout old user first(by clean the cookie file.)"
)
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Switch to DEBUG mode for better view of requests and responses."
)
parser.add_argument(
"--plugin",
default="config.json",
help="Specify the json file for the setting of the plugins."
)
parser.add_argument(
"--cookie",
default="cookie.data",
help="Specify the storage path for cookie."
)
parser.add_argument(
"--vpath",
default="./v.jpg",
help="Specify the storage path for login bar code."
)
parser.add_argument(
"--list",
action="store_true",
default=False,
help="List the recored qq messages."
)
parser.add_argument(
"--create",
action="store_true",
default=False,
help="List the recored qq messages."
)
options = parser.parse_args()
configjson = load_pluginconfig(options.plugin)
try:
configjson = load_pluginconfig(options.plugin)
print("got json: %s" % configjson)
except:
print("using default setting")
configjson = {
"dbhandler": "sqlite:///message-record.db",
"plugin_root": "./plugins",
"plugins": [
"pluginmanage",
"plugindemo"
]
}
if options.list:
list_messages()
elif options.create:
create_db(configjson["dbhandler"])
else:
try:
start_qq(
plugin_setting=configjson,
no_gui=options.no_gui,
new_user=options.new_user,
debug=options.debug,
dbhandler=configjson["dbhandler"],
cookie_file=options.cookie,
vpath=options.vpath
)
except KeyboardInterrupt:
exit(0)
if __name__ == "__main__":
main()
| zhongjingjogy/SmartQQBot | smartqq/main.py | Python | gpl-2.0 | 2,757 | 0.002176 |
import uuid
import os
import time
import tempfile
import pandas as pd
import pytest
from synapseclient.core.exceptions import SynapseHTTPError
from synapseclient import Entity, File, Folder, Link, Project, Schema
import synapseclient.core.utils as utils
import synapseutils
from tests.integration import QUERY_TIMEOUT_SEC
@pytest.fixture(scope='module', autouse=True)
def test_state(syn, schedule_for_cleanup):
class TestState:
def __init__(self):
self.syn = syn
self.project = syn.store(Project(name=str(uuid.uuid4())))
self.folder = syn.store(Folder(name=str(uuid.uuid4()), parent=self.project))
self.schedule_for_cleanup = schedule_for_cleanup
# Create testfiles for upload
self.f1 = utils.make_bogus_data_file(n=10)
self.f2 = utils.make_bogus_data_file(n=10)
self.f3 = 'https://www.synapse.org'
self.header = 'path parent used executed activityName synapseStore foo\n'
self.row1 = '%s %s %s "%s;https://www.example.com" provName bar\n' % (
self.f1, self.project.id, self.f2, self.f3
)
self.row2 = '%s %s "syn12" "syn123;https://www.example.com" provName2 bar\n' % (
self.f2, self.folder.id
)
self.row3 = '%s %s "syn12" prov2 False baz\n' % (self.f3, self.folder.id)
self.row4 = '%s %s %s act 2\n' % (self.f3, self.project.id, self.f1) # Circular reference
self.row5 = '%s syn12 \n' % (self.f3) # Wrong parent
test_state = TestState()
schedule_for_cleanup(test_state.project)
schedule_for_cleanup(test_state.f1)
schedule_for_cleanup(test_state.f2)
return test_state
def _makeManifest(content, schedule_for_cleanup):
with tempfile.NamedTemporaryFile(mode='w', suffix=".dat", delete=False) as f:
f.write(content)
filepath = utils.normalize_path(f.name)
schedule_for_cleanup(filepath)
return filepath
def test_readManifest(test_state):
"""Creates multiple manifests and verifies that they validate correctly"""
# Test manifest with missing columns
manifest = _makeManifest(
'"path"\t"foo"\n#"result_data.txt"\t"syn123"',
test_state.schedule_for_cleanup
)
pytest.raises(ValueError, synapseutils.sync.readManifestFile, test_state.syn, manifest)
# Test that there are no circular references in file and that Provenance is correct
manifest = _makeManifest(
test_state.header + test_state.row1 + test_state.row2 + test_state.row4,
test_state.schedule_for_cleanup
)
pytest.raises(RuntimeError, synapseutils.sync.readManifestFile, test_state.syn, manifest)
# Test non existent parent
manifest = _makeManifest(
test_state.header + test_state.row1 + test_state.row5,
test_state.schedule_for_cleanup
)
pytest.raises(SynapseHTTPError, synapseutils.sync.readManifestFile, test_state.syn, manifest)
# Test that all files exist in manifest
manifest = _makeManifest(
test_state.header + test_state.row1 + test_state.row2 + '/bara/basdfasdf/8hiuu.txt syn123\n',
test_state.schedule_for_cleanup
)
pytest.raises(IOError, synapseutils.sync.readManifestFile, test_state.syn, manifest)
def test_syncToSynapse(test_state):
# Test upload of accurate manifest
manifest = _makeManifest(
test_state.header + test_state.row1 + test_state.row2 + test_state.row3,
test_state.schedule_for_cleanup
)
synapseutils.syncToSynapse(test_state.syn, manifest, sendMessages=False, retries=2)
# syn.getChildren() used by syncFromSynapse() may intermittently have timing issues
time.sleep(3)
# Download using syncFromSynapse
tmpdir = tempfile.mkdtemp()
test_state.schedule_for_cleanup(tmpdir)
synapseutils.syncFromSynapse(test_state.syn, test_state.project, path=tmpdir)
orig_df = pd.read_csv(manifest, sep='\t')
orig_df.index = [os.path.basename(p) for p in orig_df.path]
new_df = pd.read_csv(os.path.join(tmpdir, synapseutils.sync.MANIFEST_FILENAME), sep='\t')
new_df.index = [os.path.basename(p) for p in new_df.path]
assert len(orig_df) == len(new_df)
new_df = new_df.loc[orig_df.index]
# Validate what was uploaded is in right location
assert new_df.parent.equals(orig_df.parent), 'Downloaded files not stored in same location'
# Validate that annotations were set
cols = synapseutils.sync.REQUIRED_FIELDS + synapseutils.sync.FILE_CONSTRUCTOR_FIELDS\
+ synapseutils.sync.STORE_FUNCTION_FIELDS + synapseutils.sync.PROVENANCE_FIELDS
orig_anots = orig_df.drop(cols, axis=1, errors='ignore')
new_anots = new_df.drop(cols, axis=1, errors='ignore')
assert orig_anots.shape[1] == new_anots.shape[1] # Verify that we have the same number of cols
assert new_anots.equals(orig_anots.loc[:, new_anots.columns]), 'Annotations different'
# Validate that provenance is correct
for provenanceType in ['executed', 'used']:
# Go through each row
for orig, new in zip(orig_df[provenanceType], new_df[provenanceType]):
if not pd.isnull(orig) and not pd.isnull(new):
# Convert local file paths into synId.versionNumber strings
orig_list = ['%s.%s' % (i.id, i.versionNumber) if isinstance(i, Entity) else i
for i in test_state.syn._convertProvenanceList(orig.split(';'))]
new_list = ['%s.%s' % (i.id, i.versionNumber) if isinstance(i, Entity) else i
for i in test_state.syn._convertProvenanceList(new.split(';'))]
assert set(orig_list) == set(new_list)
def test_syncFromSynapse(test_state):
"""This function tests recursive download as defined in syncFromSynapse
most of the functionality of this function are already tested in the
tests/integration/test_command_line_client::test_command_get_recursive_and_query
which means that the only test if for path=None
"""
# Create a Project
project_entity = test_state.syn.store(Project(name=str(uuid.uuid4())))
test_state.schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = test_state.syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload two files in Folder
uploaded_paths = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
test_state.schedule_for_cleanup(f)
test_state.syn.store(File(f, parent=folder_entity))
# Add a file in the project level as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
test_state.schedule_for_cleanup(f)
test_state.syn.store(File(f, parent=project_entity))
# syncFromSynapse() uses chunkedQuery() which will return results that are eventually consistent
# but not always right after the entity is created.
start_time = time.time()
while len(list(test_state.syn.getChildren(project_entity))) != 2:
assert time.time() - start_time < QUERY_TIMEOUT_SEC
time.sleep(2)
# Test recursive get
output = synapseutils.syncFromSynapse(test_state.syn, project_entity)
assert len(output) == len(uploaded_paths)
for f in output:
assert utils.normalize_path(f.path) in uploaded_paths
def test_syncFromSynapse__children_contain_non_file(test_state):
proj = test_state.syn.store(Project(name="test_syncFromSynapse_children_non_file" + str(uuid.uuid4())))
test_state.schedule_for_cleanup(proj)
temp_file = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(temp_file)
file_entity = test_state.syn.store(
File(
temp_file,
name="temp_file_test_syncFromSynapse_children_non_file" + str(uuid.uuid4()),
parent=proj
)
)
test_state.syn.store(Schema(name="table_test_syncFromSynapse", parent=proj))
temp_folder = tempfile.mkdtemp()
test_state.schedule_for_cleanup(temp_folder)
files_list = synapseutils.syncFromSynapse(test_state.syn, proj, temp_folder)
assert 1 == len(files_list)
assert file_entity == files_list[0]
def test_syncFromSynapse_Links(test_state):
"""This function tests recursive download of links as defined in syncFromSynapse
most of the functionality of this function are already tested in the
tests/integration/test_command_line_client::test_command_get_recursive_and_query
which means that the only test if for path=None
"""
# Create a Project
project_entity = test_state.syn.store(Project(name=str(uuid.uuid4())))
test_state.schedule_for_cleanup(project_entity.id)
# Create a Folder in Project
folder_entity = test_state.syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create a Folder hierarchy in folder_entity
inner_folder_entity = test_state.syn.store(Folder(name=str(uuid.uuid4()), parent=folder_entity))
second_folder_entity = test_state.syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
# Create and upload two files in Folder
uploaded_paths = []
for i in range(2):
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
test_state.schedule_for_cleanup(f)
file_entity = test_state.syn.store(File(f, parent=project_entity))
# Create links to inner folder
test_state.syn.store(Link(file_entity.id, parent=folder_entity))
# Add a file in the project level as well
f = utils.make_bogus_data_file()
uploaded_paths.append(f)
test_state.schedule_for_cleanup(f)
file_entity = test_state.syn.store(File(f, parent=second_folder_entity))
# Create link to inner folder
test_state.syn.store(Link(file_entity.id, parent=inner_folder_entity))
# Test recursive get
output = synapseutils.syncFromSynapse(test_state.syn, folder_entity, followLink=True)
assert len(output) == len(uploaded_paths)
for f in output:
assert utils.normalize_path(f.path) in uploaded_paths
def test_write_manifest_data__unicode_characters_in_rows(test_state):
# SYNPY-693
named_temp_file = tempfile.NamedTemporaryFile('w')
named_temp_file.close()
test_state.schedule_for_cleanup(named_temp_file.name)
keys = ["col_A", "col_B"]
data = [
{'col_A': 'asdf', 'col_B': 'qwerty'},
{'col_A': u'凵𠘨工匚口刀乇', 'col_B': u'丅乇丂丅'}
]
synapseutils.sync._write_manifest_data(named_temp_file.name, keys, data)
df = pd.read_csv(named_temp_file.name, sep='\t', encoding='utf8')
for dfrow, datarow in zip(df.itertuples(), data):
assert datarow['col_A'] == dfrow.col_A
assert datarow['col_B'] == dfrow.col_B
def test_syncFromSynapse__given_file_id(test_state):
file_path = utils.make_bogus_data_file()
test_state.schedule_for_cleanup(file_path)
file = test_state.syn.store(File(file_path, name=str(uuid.uuid4()), parent=test_state.project, synapseStore=False))
all_files = synapseutils.syncFromSynapse(test_state.syn, file.id)
assert 1 == len(all_files)
assert file == all_files[0]
| thomasyu888/synapsePythonClient | tests/integration/synapseutils/test_synapseutils_sync.py | Python | apache-2.0 | 11,189 | 0.003582 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
import numpy as np
import scipy as sp
from abc import ABCMeta, abstractmethod
from scipy import integrate
import scipy.interpolate as interpolate
from . import core
from . import refstate
__all__ = ['GammaEos','GammaCalc']
#====================================================================
# Base Class
#====================================================================
def set_calculator(eos_mod, kind, kind_opts):
assert kind in kind_opts, (
kind + ' is not a valid thermal calculator. '+
'You must select one of: ' + str(kind_opts))
eos_mod._kind = kind
if kind=='GammaPowLaw':
calc = _GammaPowLaw(eos_mod)
elif kind=='GammaShiftPowLaw':
calc = _GammaShiftPowLaw(eos_mod)
elif kind=='GammaFiniteStrain':
calc = _GammaFiniteStrain(eos_mod)
else:
raise NotImplementedError(kind+' is not a valid '+
'GammaEos Calculator.')
eos_mod._add_calculator(calc, calc_type='gamma')
pass
#====================================================================
class GammaEos(with_metaclass(ABCMeta, core.Eos)):
"""
EOS model for compression dependence of Grüneisen parameter.
Parameters
----------
Thermodyn properties depend only on volume
"""
_kind_opts = ['GammaPowLaw','GammaShiftPowLaw','GammaFiniteStrain']
def __init__(self, kind='GammaPowLaw', natom=1, model_state={}):
self._pre_init(natom=natom)
set_calculator(self, kind, self._kind_opts)
ref_compress_state='P0'
ref_thermal_state='T0'
ref_energy_type = 'E0'
refstate.set_calculator(self, ref_compress_state=ref_compress_state,
ref_thermal_state=ref_thermal_state,
ref_energy_type=ref_energy_type)
# self._set_ref_state()
self._post_init(model_state=model_state)
pass
def __repr__(self):
calc = self.calculators['gamma']
return ("GammaEos(kind={kind}, natom={natom}, "
"model_state={model_state}, "
")"
.format(kind=repr(calc.name),
natom=repr(self.natom),
model_state=self.model_state
)
)
def _set_ref_state(self):
calc = self.calculators['gamma']
path_const = calc.path_const
if path_const=='S':
param_ref_names = []
param_ref_units = []
param_ref_defaults = []
param_ref_scales = []
else:
raise NotImplementedError(
'path_const '+path_const+' is not valid for ThermalEos.')
self._path_const = calc.path_const
self._param_ref_names = param_ref_names
self._param_ref_units = param_ref_units
self._param_ref_defaults = param_ref_defaults
self._param_ref_scales = param_ref_scales
pass
def gamma(self, V_a):
gamma_a = self.calculators['gamma']._calc_gamma(V_a)
return gamma_a
def gamma_deriv(self, V_a):
gamma_deriv_a = self.calculators['gamma']._calc_gamma_deriv(V_a)
return gamma_deriv_a
def temp(self, V_a, T0=None):
temp_a = self.calculators['gamma']._calc_temp(V_a, T0=T0)
return temp_a
#====================================================================
class GammaCalc(with_metaclass(ABCMeta, core.Calculator)):
"""
Abstract Equation of State class for a reference Compression Path
Path can either be isothermal (T=const) or adiabatic (S=const)
For this restricted path, thermodyn properties depend only on volume
"""
def __init__(self, eos_mod):
self._eos_mod = eos_mod
self._init_params()
self._path_const = 'S'
pass
@property
def path_const( self ):
return self._path_const
####################
# Required Methods #
####################
@abstractmethod
def _init_params( self ):
"""Initialize list of calculator parameter names."""
pass
@abstractmethod
def _calc_gamma(self, V_a):
pass
@abstractmethod
def _calc_gamma_deriv(self, V_a):
pass
@abstractmethod
def _calc_temp(self, V_a, T0=None):
pass
def _calc_theta(self, V_a):
theta0 = self.eos_mod.get_param_values(param_names=['theta0'])
theta = self._calc_temp(V_a, T0=theta0)
return theta
####################
# Optional Methods #
####################
# EOS property functions
def _calc_param_deriv(self, fname, paramname, V_a, dxfrac=1e-6):
scale_a, paramkey_a = self.get_param_scale(apply_expand_adj=True )
scale = scale_a[paramkey_a==paramname][0]
# print 'scale: ' + np.str(scale)
#if (paramname is 'E0') and (fname is 'energy'):
# return np.ones(V_a.shape)
try:
fun = getattr(self, fname)
# Note that self is implicitly included
val0_a = fun(V_a)
except:
assert False, 'That is not a valid function name ' + \
'(e.g. it should be press or energy)'
try:
param = core.get_params([paramname])[0]
dparam = scale*dxfrac
# print 'param: ' + np.str(param)
# print 'dparam: ' + np.str(dparam)
except:
assert False, 'This is not a valid parameter name'
# set param value in eos_d dict
core.set_params([paramname,], [param+dparam,])
# Note that self is implicitly included
dval_a = fun(V_a) - val0_a
# reset param to original value
core.set_params([paramname], [param])
deriv_a = dval_a/dxfrac
return deriv_a
def _calc_energy_perturb(self, V_a):
"""Returns Energy pertubation basis functions resulting from fractional changes to EOS params."""
fname = 'energy'
scale_a, paramkey_a = self.get_param_scale(
apply_expand_adj=self.expand_adj)
Eperturb_a = []
for paramname in paramkey_a:
iEperturb_a = self._calc_param_deriv(fname, paramname, V_a)
Eperturb_a.append(iEperturb_a)
Eperturb_a = np.array(Eperturb_a)
return Eperturb_a, scale_a, paramkey_a
#====================================================================
# Implementations
#====================================================================
class _GammaPowLaw(GammaCalc):
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaPowLaw, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 1.0
q = 1.0
self._param_names = ['V0', 'gamma0', 'q']
self._param_units = ['ang^3', '1', '1']
self._param_defaults = [V0, gamma0, q]
self._param_scales = [V0, gamma0, q]
pass
def _calc_gamma(self, V_a):
V0, gamma0, q = self.eos_mod.get_param_values(
param_names=['V0','gamma0','q'])
gamma_a = gamma0 *(V_a/V0)**q
return gamma_a
def _calc_gamma_deriv(self, V_a):
q, = self.eos_mod.get_param_values(param_names=['q'])
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = q*gamma_a/V_a
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
# T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0])
gamma0, q = self.eos_mod.get_param_values(
param_names=['gamma0','q'])
gamma_a = self._calc_gamma(V_a)
T_a = T0*np.exp(-(gamma_a - gamma0)/q)
return T_a
#====================================================================
class _GammaShiftPowLaw(GammaCalc):
"""
Shifted Power Law description of Grüneisen Parameter (Al’tshuler, 1987)
"""
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaShiftPowLaw, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 1.5
gamma_inf = 2/3
beta = 1.4
T0 = 300
self._param_names = ['V0', 'gamma0', 'gamma_inf', 'beta', 'T0']
self._param_units = ['ang^3', '1', '1', '1', 'K']
self._param_defaults = [V0, gamma0, gamma_inf, beta, T0]
self._param_scales = [V0, gamma0, gamma_inf, beta, T0]
pass
def _calc_gamma(self, V_a):
V0, gamma0, gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gamma_inf','beta'])
gamma_a = gamma_inf + (gamma0-gamma_inf)*(V_a/V0)**beta
return gamma_a
def _calc_gamma_deriv(self, V_a):
gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['gamma_inf','beta'])
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = beta/V_a*(gamma_a-gamma_inf)
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
T0, = self.eos_mod.get_param_values(param_names=['T0'], overrides=[T0])
V0, gamma0, gamma_inf, beta = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gamma_inf','beta'])
gamma_a = self._calc_gamma(V_a)
x = V_a/V0
T_a = T0*x**(-gamma_inf)*np.exp((gamma0-gamma_inf)/beta*(1-x**beta))
return T_a
#====================================================================
class _GammaFiniteStrain(GammaCalc):
_path_opts=['S']
def __init__(self, eos_mod):
super(_GammaFiniteStrain, self).__init__(eos_mod)
pass
def _init_params(self):
"""Initialize list of calculator parameter names."""
V0 = 100
gamma0 = 0.5
gammap0 = -2
self._param_names = ['V0', 'gamma0', 'gammap0']
self._param_units = ['ang^3', '1', '1']
self._param_defaults = [V0, gamma0, gammap0]
self._param_scales = [V0, gamma0, gammap0]
pass
def _calc_strain_coefs(self):
V0, gamma0, gammap0 = self.eos_mod.get_param_values(
param_names=['V0','gamma0','gammap0'])
a1 = 6*gamma0
a2 = -12*gamma0 +36*gamma0**2 -18*gammap0
return a1, a2
def _calc_fstrain(self, V_a, deriv=False):
V0, = self.eos_mod.get_param_values(param_names=['V0'])
x = V_a/V0
if deriv:
return -1/(3*V0)*x**(-5/3)
else:
return 1/2*(x**(-2/3)-1)
pass
def _calc_gamma(self, V_a):
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
gamma_a = (2*fstr_a+1)*(a1+a2*fstr_a)/(6*(1+a1*fstr_a+0.5*a2*fstr_a**2))
return gamma_a
def _calc_gamma_deriv(self, V_a):
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
fstr_deriv = self._calc_fstrain(V_a, deriv=True)
gamma_a = self._calc_gamma(V_a)
gamma_deriv_a = gamma_a*fstr_deriv*(
2/(2*fstr_a+1)+a2/(a1+a2*fstr_a)
-(a1+a2*fstr_a)/(1+a1*fstr_a+.5*a2*fstr_a**2))
return gamma_deriv_a
def _calc_temp(self, V_a, T0=None):
if T0 is None:
T0 = self.eos_mod.refstate.ref_temp()
a1, a2 = self._calc_strain_coefs()
fstr_a = self._calc_fstrain(V_a)
T_a = T0*np.sqrt(1 + a1*fstr_a + 0.5*a2*fstr_a**2)
return T_a
#====================================================================
| aswolf/xmeos | xmeos/models/gamma.py | Python | mit | 11,700 | 0.005215 |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 15:22:48 2011
@author: moritz
"""
__all__ = ["bslip"]
| MMaus/mutils | cmodels/__init__.py | Python | gpl-2.0 | 106 | 0 |
"""
Utilities for testing trajectories.
"""
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from imusim.testing.quaternions import assertQuaternionAlmostEqual
from imusim.maths.quaternions import QuaternionArray
from imusim.testing.vectors import assert_vectors_correlated
from imusim.utilities.time_series import TimeSeries
import numpy as np
def checkTrajectory(T, truePositions, trueRotations):
"""
Check the outputs of a trajectory model agree with truth values.
@param T: Trajectory to check.
@param truePositions: L{TimeSeries} of true position values.
@param trueRotations: L{TimeSeries} of true rotation values.
"""
# Get time indices at which position comparisons valid
t = truePositions.timestamps
validity = (t >= T.startTime) & (t <= T.endTime)
t = t[validity]
dt = np.gradient(t)
p = truePositions.values[:,validity]
# Check position
assert_vectors_correlated(T.position(t), p)
# Check velocity
v = np.array(map(np.gradient, p)) / dt
assert_vectors_correlated(T.velocity(t[2:-2]), v[:,2:-2])
# Check acceleration
a = np.array(map(np.gradient, v)) / dt
assert_vectors_correlated(T.acceleration(t[4:-4]), a[:,4:-4])
# Get time indices at which rotation comparisons valid
t = trueRotations.timestamps
validity = (t >= T.startTime) & (t <= T.endTime)
t = t[validity]
r = trueRotations.values[validity]
# Check rotation
assertQuaternionAlmostEqual(T.rotation(t), r, tol=0.05)
# Check angular velocity
r, lastR = r[1:], r[:-1]
t, dt = t[1:], np.diff(t)
diffOmega = (2 * (r - lastR) * lastR.conjugate).array.T[1:] / dt
trajOmega = T.rotationalVelocity(t - dt/2)
assert_vectors_correlated(trajOmega[:,2:-2], diffOmega[:,2:-2])
# Check angular acceleration
diffAlpha = np.array(map(np.gradient, diffOmega)) / dt
trajAlpha = T.rotationalAcceleration(t - dt/2)
assert_vectors_correlated(trajAlpha[:,4:-4], diffAlpha[:,4:-4])
| alfkjartan/nvgimu | nvg/testing/trajectories.py | Python | gpl-3.0 | 2,671 | 0.002995 |
from tests.base import ApiDBTestCase
from zou.app.models.entity import Entity
class BreakdownTestCase(ApiDBTestCase):
def setUp(self):
super(BreakdownTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_asset_type()
self.generate_fixture_asset_types()
self.generate_fixture_episode()
self.generate_fixture_sequence()
self.generate_fixture_shot()
self.generate_fixture_asset()
self.generate_fixture_asset_character()
def test_update_casting(self):
self.project_id = str(self.project.id)
self.shot_id = str(self.shot.id)
self.asset_id = str(self.asset.id)
self.asset_character_id = str(self.asset_character.id)
self.asset_type_character_id = str(self.asset_type_character.id)
self.shot_name = self.shot.name
self.sequence_name = self.sequence.name
self.episode_name = self.episode.name
casting = self.get(
"/data/projects/%s/entities/%s/casting"
% (self.project_id, self.shot_id)
)
self.assertListEqual(casting, [])
newCasting = [
{"asset_id": self.asset_id, "nb_occurences": 1},
{"asset_id": self.asset_character_id, "nb_occurences": 3},
]
path = "/data/shots/%s/casting" % str(self.shot_id)
path = "/data/projects/%s/entities/%s/casting" % (
self.project_id,
self.shot_id,
)
self.put(path, newCasting, 200)
casting = self.get(
"/data/projects/%s/entities/%s/casting"
% (self.project_id, self.shot_id)
)
casting = sorted(casting, key=lambda x: x["nb_occurences"])
self.assertEqual(casting[0]["asset_id"], newCasting[0]["asset_id"])
self.assertEqual(
casting[0]["nb_occurences"], newCasting[0]["nb_occurences"]
)
self.assertEqual(casting[1]["asset_id"], newCasting[1]["asset_id"])
self.assertEqual(
casting[1]["nb_occurences"], newCasting[1]["nb_occurences"]
)
self.assertEqual(casting[1]["asset_name"], self.asset_character.name)
self.assertEqual(
casting[1]["asset_type_name"], self.asset_type_character.name
)
cast_in = self.get("/data/assets/%s/cast-in" % self.asset_id)
self.assertEqual(cast_in[0]["shot_name"], self.shot.name)
self.assertEqual(cast_in[0]["sequence_name"], self.sequence.name)
self.assertEqual(cast_in[0]["episode_name"], self.episode.name)
def test_get_assets_for_shots(self):
self.entities = self.generate_data(
Entity,
3,
entities_out=[],
entities_in=[],
instance_casting=[],
project_id=self.project.id,
entity_type_id=self.asset_type.id,
)
self.shot.entities_out = self.entities
self.shot.save()
assets = self.get("data/shots/%s/assets" % self.shot.id)
self.assertEqual(len(assets), 3)
self.assertTrue(
assets[0]["id"] in [str(entity.id) for entity in self.entities]
)
def test_update_asset_casting(self):
self.asset_id = str(self.asset.id)
self.asset_character_id = str(self.asset_character.id)
self.asset_type_character_id = str(self.asset_type_character.id)
casting = self.get("/data/assets/%s/casting" % self.asset_id)
self.assertListEqual(casting, [])
newCasting = [
{"asset_id": self.asset_character_id, "nb_occurences": 3}
]
path = "/data/assets/%s/casting" % str(self.asset_id)
self.put(path, newCasting, 200)
casting = self.get("/data/assets/%s/casting" % self.asset_id)
casting = sorted(casting, key=lambda x: x["nb_occurences"])
self.assertEqual(casting[0]["asset_id"], newCasting[0]["asset_id"])
self.assertEqual(
casting[0]["nb_occurences"], newCasting[0]["nb_occurences"]
)
self.assertEqual(casting[0]["asset_name"], self.asset_character.name)
cast_in = self.get("/data/assets/%s/cast-in" % self.asset_character_id)
self.assertEqual(len(cast_in), 1)
self.assertEqual(cast_in[0]["asset_name"], self.asset.name)
def test_get_casting_for_assets(self):
self.entities = self.generate_data(
Entity,
3,
entities_out=[],
entities_in=[],
instance_casting=[],
project_id=self.project.id,
entity_type_id=self.asset_type.id,
)
self.asset.entities_out = self.entities
self.asset.save()
assets = self.get("data/assets/%s/assets" % self.asset.id)
self.assertEqual(len(assets), 3)
self.assertTrue(
assets[0]["id"] in [str(entity.id) for entity in self.entities]
)
| cgwire/zou | tests/shots/test_breakdown.py | Python | agpl-3.0 | 4,949 | 0 |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import socket
import sys
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW, _LI
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
def __init__(self, **kwargs):
self.connection = netapp_api.NaServer(
host=kwargs['hostname'],
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=kwargs['username'],
password=kwargs['password'])
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = na_utils.Features()
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def get_connection(self):
return self.connection
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def send_request(self, api_name, api_args=None, enable_tunneling=True):
"""Sends request to Ontapi."""
request = netapp_api.NaElement(api_name)
if api_args:
request.translate_struct(api_args)
return self.connection.invoke_successfully(request, enable_tunneling)
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
lun_create = netapp_api.NaElement.create_node_with_children(
'lun-create-by-size',
**{'path': path, 'size': six.text_type(size),
'ostype': metadata['OsType'],
'space-reservation-enabled': metadata['SpaceReserved']})
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s"),
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning(_LW('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s'), {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
"""Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning(_LW("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s"), {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
six.reraise(*exc_info)
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = netapp_api.NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True)
def add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = netapp_api.NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
"""Resize the LUN."""
seg = path.split("/")
LOG.info(_LI("Resizing LUN %s directly to new size."), seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
"""Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] =\
result.get_child_content("bytes-per-sector")
geometry['sectors_per_track'] =\
result.get_child_content("sectors-per-track")
geometry['tracks_per_cylinder'] =\
result.get_child_content("tracks-per-cylinder")
geometry['cylinders'] =\
result.get_child_content("cylinders")
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
LOG.error(_LE("LUN %(path)s geometry failed. Message - %(msg)s"),
{'path': path, 'msg': e.message})
return geometry
def get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = netapp_api.NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def move_lun(self, path, new_path):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
{'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.connection.invoke_successfully(lun_move, True)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
raise NotImplementedError()
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
raise NotImplementedError()
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
raise NotImplementedError()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
raise NotImplementedError()
def _has_luns_mapped_to_initiator(self, initiator):
"""Checks whether any LUNs are mapped to the given initiator."""
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = result.get_child_by_name(
'lun-maps') or netapp_api.NaElement('none')
return len(lun_maps_container.get_children()) > 0
def has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
raise NotImplementedError()
def get_performance_counter_info(self, object_name, counter_name):
"""Gets info about one or more Data ONTAP performance counters."""
api_args = {'objectname': object_name}
result = self.send_request('perf-object-counter-list-info',
api_args,
enable_tunneling=False)
counters = result.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
if counter.get_child_content('name') == counter_name:
labels = []
label_list = counter.get_child_by_name(
'labels') or netapp_api.NaElement('None')
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {
'name': counter_name,
'labels': labels,
'base-counter': base_counter,
}
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def provide_ems(self, requester, netapp_backend, app_version,
server_type="cluster"):
"""Provide ems with volume stats for the requester.
:param server_type: cluster or 7mode.
"""
def _create_ems(netapp_backend, app_version, server_type):
"""Create ems API request."""
ems_log = netapp_api.NaElement('ems-autosupport-log')
host = socket.getfqdn() or 'Cinder_node'
if server_type == "cluster":
dest = "cluster node"
else:
dest = "7 mode controller"
ems_log.add_new_child('computer-name', host)
ems_log.add_new_child('event-id', '0')
ems_log.add_new_child('event-source',
'Cinder driver %s' % netapp_backend)
ems_log.add_new_child('app-version', app_version)
ems_log.add_new_child('category', 'provisioning')
ems_log.add_new_child('event-description',
'OpenStack Cinder connected to %s' % dest)
ems_log.add_new_child('log-level', '6')
ems_log.add_new_child('auto-support', 'false')
return ems_log
def _create_vs_get():
"""Create vs_get API request."""
vs_get = netapp_api.NaElement('vserver-get-iter')
vs_get.add_new_child('max-records', '1')
query = netapp_api.NaElement('query')
query.add_node_with_children('vserver-info',
**{'vserver-type': 'node'})
vs_get.add_child_elem(query)
desired = netapp_api.NaElement('desired-attributes')
desired.add_node_with_children(
'vserver-info', **{'vserver-name': '', 'vserver-type': ''})
vs_get.add_child_elem(desired)
return vs_get
def _get_cluster_node(na_server):
"""Get the cluster node for ems."""
na_server.set_vserver(None)
vs_get = _create_vs_get()
res = na_server.invoke_successfully(vs_get)
if (res.get_child_content('num-records') and
int(res.get_child_content('num-records')) > 0):
attr_list = res.get_child_by_name('attributes-list')
vs_info = attr_list.get_child_by_name('vserver-info')
vs_name = vs_info.get_child_content('vserver-name')
return vs_name
return None
do_ems = True
if hasattr(requester, 'last_ems'):
sec_limit = 3559
if not (timeutils.is_older_than(requester.last_ems, sec_limit)):
do_ems = False
if do_ems:
na_server = copy.copy(self.connection)
na_server.set_timeout(25)
ems = _create_ems(netapp_backend, app_version, server_type)
try:
if server_type == "cluster":
api_version = na_server.get_api_version()
if api_version:
major, minor = api_version
else:
raise netapp_api.NaApiError(
code='Not found',
message='No API version found')
if major == 1 and minor > 15:
node = getattr(requester, 'vserver', None)
else:
node = _get_cluster_node(na_server)
if node is None:
raise netapp_api.NaApiError(
code='Not found',
message='No vserver found')
na_server.set_vserver(node)
else:
na_server.set_vfiler(None)
na_server.invoke_successfully(ems, True)
LOG.debug("ems executed successfully.")
except netapp_api.NaApiError as e:
LOG.warning(_LW("Failed to invoke ems. Message : %s"), e)
finally:
requester.last_ems = timeutils.utcnow()
| dims/cinder | cinder/volume/drivers/netapp/dataontap/client/client_base.py | Python | apache-2.0 | 15,903 | 0 |
# -*- coding: utf-8 -*-
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class NosvideoCom(SimpleCrypter):
__name__ = "NosvideoCom"
__type__ = "crypter"
__version__ = "0.07"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?nosvideo\.com/\?v=\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No",
"Create folder for each package", "Default"),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Nosvideo.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("igel", "igelkun@myopera.com")]
LINK_PATTERN = r'href="(http://(?:w{3}\.)?nosupload\.com/\?d=\w+)"'
NAME_PATTERN = r'<[tT]itle>Watch (?P<N>.+?)<'
| rlindner81/pyload | module/plugins/crypter/NosvideoCom.py | Python | gpl-3.0 | 917 | 0.002181 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RModelmetrics(RPackage):
"""Collection of metrics for evaluating models written in C++ using
'Rcpp'."""
homepage = "https://cran.r-project.org/package=ModelMetrics"
url = "https://cran.r-project.org/src/contrib/ModelMetrics_1.1.0.tar.gz"
version('1.1.0', 'd43175001f0531b8810d2802d76b7b44')
depends_on('r@3.2.2:')
depends_on('r-rcpp', type=('build', 'run'))
| TheTimmy/spack | var/spack/repos/builtin/packages/r-modelmetrics/package.py | Python | lgpl-2.1 | 1,657 | 0.001207 |
import sys
# --- ROBOT DYNAMIC SIMULATION -------------------------------------------------
from dynamic_graph.sot.hrp2_14.robot import Robot
robot = Robot( 'robot' )
# --- LINK ROBOT VIEWER -------------------------------------------------------
from dynamic_graph.sot.core.utils.viewer_helper import addRobotViewer
addRobotViewer(robot.device,small=True,verbose=False)
robot.timeStep=5e-3
usingRobotViewer = True
from dynamic_graph.sot.core import Stack_of_vector
acc = Stack_of_vector('acc')
gyr = Stack_of_vector('gyr')
acc.selec1(0,2)
acc.selec2(0,1)
gyr.selec1(0,2)
gyr.selec2(0,1)
acc.sin1.value=(0.0,0.0)
acc.sin2.value=(9.8,)
gyr.sin1.value=(0.0,0.0)
gyr.sin2.value=(0.0,)
robot.device.accelerometer = acc.sout
robot.device.gyrometer = gyr.sout
robot.device.forceLLEG.value = (0,0,284,0,0,0)
robot.device.forceRLEG.value = (0,0,284,0,0,0)
# --- MAIN LOOP ----------------------------------------------------------------
from dynamic_graph.sot.core.utils.thread_interruptible_loop import loopInThread,optionalparentheses,loopShortcuts
refreshList = list()
@loopInThread
def loop():
robot.device.increment(robot.timeStep)
for cmd in refreshList: cmd()
runner=loop()
[go,stop,next,n] = loopShortcuts(runner)
@optionalparentheses
def iter(): print 'iter = ',robot.device.state.time
@optionalparentheses
def status(): print runner.isPlay
# ----------------------------------------------------------------------
for scripts in sys.argv[1:]:
if scripts[0]!='+':
raw_input('Enter when you are ready to execute **'+scripts+'** :')
else: scripts = scripts[1:]
loop = scripts[0]=='*'
if loop: scripts = scripts[1:]
while True:
if scripts[0]=='=':
print "["+scripts[1:]+"]"
exec(scripts[1:])
else:
execfile(scripts)
if loop: raw_input('Again <'+scripts+'> ?')
else: break
| stack-of-tasks/sot-stabilizer | python/scripts/robotViewerLauncher.py | Python | lgpl-3.0 | 1,912 | 0.028766 |
'''
IAM2 Vid Attribute Checker.
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.header.checker as checker_header
import zstackwoodpecker.operations.iam2_operations as iam2_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.affinitygroup_operations as ag_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.vxlan_operations as vxlan_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import zstackwoodpecker.operations.zwatch_operations as zwt_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_lib as test_lib
import time
import os
class zstack_vid_attr_checker(checker_header.TestChecker):
def __init__(self):
super(zstack_vid_attr_checker, self).__init__()
def check_login_by_vid(self, username, password):
session_uuid = iam2_ops.login_iam2_virtual_id(username, password)
def check_login_by_account(self, username, password):
session_uuid = acc_ops.login_by_account(username, password)
def check_vm_operation(self, session_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions, session_uuid=session_uuid)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions, session_uuid=session_uuid)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions, session_uuid=session_uuid)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_policy_checker')
vm_creation_option.set_session_uuid(session_uuid)
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
# VM related ops: Create, Delete, Expunge, Start, Stop, Suspend, Resume, Migrate
vm_ops.stop_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.start_vm(vm_uuid, session_uuid=session_uuid)
candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm_uuid)
if candidate_hosts != None and test_lib.lib_check_vm_live_migration_cap(vm):
try:
vm_ops.migrate_vm(vm_uuid, candidate_hosts.inventories[0].uuid, session_uuid=session_uuid)
except:
vm_ops.migrate_vm(vm_uuid, candidate_hosts[0].uuid, session_uuid=session_uuid)
vm_ops.stop_vm(vm_uuid, force='cold', session_uuid=session_uuid)
vm_ops.start_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.suspend_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.resume_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.destroy_vm(vm_uuid, session_uuid=session_uuid)
vm_ops.expunge_vm(vm_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_image_operation(self, session_uuid=None):
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, session_uuid=session_uuid)[0]
image_option = test_util.ImageOption()
image_option.set_name('image_policy_checker')
image_option.set_description('image for policy check')
image_option.set_format('raw')
image_option.set_mediaType('RootVolumeTemplate')
image_option.set_backup_storage_uuid_list([bs.uuid])
image_option.url = "http://fake_iamge/image.raw"
image_option.set_session_uuid(session_uuid)
image_uuid = img_ops.add_image(image_option).uuid
img_ops.sync_image_size(image_uuid, session_uuid=session_uuid)
img_ops.change_image_state(image_uuid, 'disable', session_uuid=session_uuid)
img_ops.change_image_state(image_uuid, 'enable', session_uuid=session_uuid)
if bs.type == 'ImageStoreBackupStorage':
img_ops.export_image_from_backup_storage(image_uuid, bs.uuid, session_uuid=session_uuid)
img_ops.delete_exported_image_from_backup_storage(image_uuid, bs.uuid, session_uuid=session_uuid)
img_ops.set_image_qga_enable(image_uuid, session_uuid=session_uuid)
img_ops.set_image_qga_disable(image_uuid, session_uuid=session_uuid)
cond = res_ops.gen_query_conditions('name', '=', "image_policy_checker")
image = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid=session_uuid)
if image == None:
test_util.test_fail('fail to query image just added')
return self.judge(False)
img_ops.delete_image(image_uuid, session_uuid=session_uuid)
img_ops.expunge_image(image_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_snapshot(self, session_uuid=None):
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, session_uuid=session_uuid)[0]
disk_offering_uuid = res_ops.query_resource(res_ops.DISK_OFFERING, session_uuid=session_uuid)[0].uuid
volume_option = test_util.VolumeOption()
volume_option.set_disk_offering_uuid(disk_offering_uuid)
volume_option.set_session_uuid(session_uuid)
volume_option.set_name('data_volume_for_snapshot_policy_checker')
data_volume = vol_ops.create_volume_from_offering(volume_option)
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_without_create_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
vol_ops.attach_volume(data_volume.uuid, vm_uuid)
snapshot_option = test_util.SnapshotOption()
snapshot_option.set_volume_uuid(data_volume.uuid)
snapshot_option.set_name('snapshot_policy_checker')
snapshot_option.set_description('snapshot for policy check')
snapshot_option.set_session_uuid(session_uuid)
snapshot_uuid = vol_ops.create_snapshot(snapshot_option).uuid
vm_ops.stop_vm(vm_uuid, force='cold')
vol_ops.use_snapshot(snapshot_uuid, session_uuid)
#vol_ops.backup_snapshot(snapshot_uuid, bs.uuid, project_login_session_uuid)
#new_volume = vol_ops.create_volume_from_snapshot(snapshot_uuid)
#vol_ops.delete_snapshot_from_backupstorage(snapshot_uuid, [bs.uuid], session_uuid=project_login_session_uuid)
vol_ops.delete_snapshot(snapshot_uuid, session_uuid)
vol_ops.delete_volume(data_volume.uuid)
vol_ops.expunge_volume(data_volume.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
def check_volume_operation(self, session_uuid=None):
# Volume related ops: Create, Delete, Expunge, Attach, Dettach, Enable, Disable
disk_offering_uuid = res_ops.query_resource(res_ops.DISK_OFFERING)[0].uuid
volume_option = test_util.VolumeOption()
volume_option.set_disk_offering_uuid(disk_offering_uuid)
volume_option.set_name('data_volume_policy_checker')
volume_option.set_session_uuid(session_uuid)
data_volume = vol_ops.create_volume_from_offering(volume_option)
vol_ops.stop_volume(data_volume.uuid, session_uuid=session_uuid)
vol_ops.start_volume(data_volume.uuid, session_uuid=session_uuid)
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_for_vol_policy_checker')
#vm_creation_option.set_session_uuid(project_login_session_uuid)
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
vol_ops.attach_volume(data_volume.uuid, vm_uuid, session_uuid=session_uuid)
vol_ops.detach_volume(data_volume.uuid, vm_uuid, session_uuid=session_uuid)
vol_ops.delete_volume(data_volume.uuid, session_uuid=session_uuid)
vol_ops.expunge_volume(data_volume.uuid, session_uuid=session_uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
def check_affinity_group(self, session_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_for_affinity_group_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
ag_uuid = ag_ops.create_affinity_group('affinity_group_policy_checker', 'antiHard', session_uuid=session_uuid).uuid
ag_ops.add_vm_to_affinity_group(ag_uuid, vm_uuid, session_uuid=session_uuid)
ag_ops.remove_vm_from_affinity_group(ag_uuid, vm_uuid, session_uuid=session_uuid)
ag_ops.delete_affinity_group(ag_uuid, session_uuid=session_uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
def check_networks(self, session_uuid=None):
zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid
vxlan_pool = res_ops.get_resource(res_ops.L2_VXLAN_NETWORK_POOL)
clear_vxlan_pool = False
if vxlan_pool == None or len(vxlan_pool) == 0:
vxlan_pool_uuid = vxlan_ops.create_l2_vxlan_network_pool('vxlan_poll_for networks_polocy_checker', zone_uuid).uuid
vni_uuid = vxlan_ops.create_vni_range('vni_range_for_networks_policy_checker', '10000', '20000', vxlan_pool_uuid).uuid
clear_vxlan_pool = True
elif len(vxlan_pool[0].attachedVniRanges) == 0:
vni_uuid = vxlan_ops.create_vni_range('vni_range_for_networks_policy_checker', '10000', '20000', vxlan_pool[0].uuid).uuid
clear_vxlan_pool = True
else:
vxlan_pool_uuid = vxlan_pool[0].uuid
vxlan_pool_uuid = res_ops.get_resource(res_ops.L2_VXLAN_NETWORK_POOL, session_uuid=session_uuid)[0].uuid
vxlan_l2_uuid = vxlan_ops.create_l2_vxlan_network('vxlan_for_policy_checker', vxlan_pool_uuid, zone_uuid, session_uuid=session_uuid).uuid
conditions = res_ops.gen_query_conditions('name', '=', 'vrouter')
service_providor_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, conditions, session_uuid=session_uuid)[0].uuid
l3_uuid = net_ops.create_l3('l3_network_for_policy_checker', vxlan_l2_uuid, session_uuid=session_uuid).uuid
net_ops.attach_network_service_to_l3network(l3_uuid, service_providor_uuid, session_uuid=session_uuid)
#net_ops.detach_network_service_from_l3network(l3_uuid, service_providor_uuid, session_uuid=project_login_session_uuid)
net_ops.delete_l3(l3_uuid, session_uuid=session_uuid)
if clear_vxlan_pool:
vxlan_ops.delete_vni_range(vni_uuid, session_uuid=session_uuid)
net_ops.delete_l2(vxlan_l2_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_eip(self, session_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('category', '=', 'Public')
l3_pub_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_for_eip_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
vip_option = test_util.VipOption()
vip_option.set_name("vip_for_eip_policy_checker")
vip_option.set_session_uuid(session_uuid)
vip_option.set_l3_uuid(l3_pub_uuid)
vip = net_ops.create_vip(vip_option)
conditions = res_ops.gen_query_conditions('vmInstance.uuid', '=', vm_uuid)
vm_nic_uuid = res_ops.query_resource(res_ops.VM_NIC, conditions)[0].uuid
test_util.test_logger('vip creation finished, vm nic uuid is %s' %vm_nic_uuid)
eip_option = test_util.EipOption()
eip_option.set_name('eip_policy_checker')
eip_option.set_session_uuid(session_uuid)
eip_option.set_vip_uuid(vip.uuid)
eip_option.set_vm_nic_uuid(vm_nic_uuid)
eip = net_ops.create_eip(eip_option)
net_ops.detach_eip(eip.uuid, session_uuid=session_uuid)
net_ops.attach_eip(eip.uuid, vm_nic_uuid, session_uuid=session_uuid)
net_ops.delete_eip(eip.uuid)
net_ops.delete_vip(vip.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
test_util.test_logger("revoke_resources should not be runned")
return self.judge(True)
def check_security_group(self, session_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('name', '=', 'SecurityGroup')
sg_service_providor_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('l3Network.uuid', '=', l3_net_uuid)
network_service_list = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF, conditions)
sg_service_need_attach = True
sg_service_need_detach = False
for service in network_service_list:
if service.networkServiceType == 'SecurityGroup':
sg_service_need_attach = False
if sg_service_need_attach:
net_ops.attach_sg_service_to_l3network(l3_net_uuid, sg_service_providor_uuid, session_uuid=session_uuid)
sg_service_need_detach = True
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_for_security_group_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
sg_creation_option = test_util.SecurityGroupOption()
sg_creation_option.set_name('security_group_policy_checker')
sg_creation_option.set_session_uuid(session_uuid=session_uuid)
sg_uuid = net_ops.create_security_group(sg_creation_option).uuid
net_ops.attach_security_group_to_l3(sg_uuid, l3_net_uuid, session_uuid=session_uuid)
conditions = res_ops.gen_query_conditions('vmInstance.uuid', '=', vm_uuid)
vm_nic_uuid = res_ops.query_resource(res_ops.VM_NIC, conditions)[0].uuid
net_ops.add_nic_to_security_group(sg_uuid, [vm_nic_uuid], session_uuid=session_uuid)
net_ops.remove_nic_from_security_group(sg_uuid, [vm_nic_uuid], session_uuid=session_uuid)
net_ops.detach_security_group_from_l3(sg_uuid, l3_net_uuid, session_uuid=session_uuid)
net_ops.delete_security_group(sg_uuid, session_uuid=session_uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
if sg_service_need_detach:
net_ops.detach_sg_service_from_l3network(l3_net_uuid, sg_service_providor_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_load_balancer(self, session_uuid=None):
conditions = res_ops.gen_query_conditions('category', '=', 'Public')
l3_pub_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('name', '=', 'vrouter')
service_providor_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('l3Network.uuid', '=', l3_net_uuid)
network_service_list = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF, conditions)
lb_service_need_attach = True
lb_service_need_detach = False
for service in network_service_list:
if service.networkServiceType == 'LoadBalancer':
lb_service_need_attach = False
if lb_service_need_attach:
net_ops.attach_lb_service_to_l3network(l3_net_uuid, service_providor_uuid, session_uuid=session_uuid)
lb_service_need_detach = True
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_for_load_balancer_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
vip_option = test_util.VipOption()
vip_option.set_name("vip_for_load_balancer_policy_checker")
vip_option.set_session_uuid(session_uuid)
vip_option.set_l3_uuid(l3_pub_uuid)
vip = net_ops.create_vip(vip_option)
lb_uuid = net_ops.create_load_balancer(vip.uuid, 'load_balancer_policy_checker', session_uuid=session_uuid).uuid
lb_listener_option = test_util.LoadBalancerListenerOption()
lb_listener_option.set_name('load_balancer_listener_policy_checker')
lb_listener_option.set_load_balancer_uuid(lb_uuid)
lb_listener_option.set_load_balancer_port('2222')
lb_listener_option.set_instance_port('80')
lb_listener_option.set_protocol('http')
lb_listener_option.set_session_uuid(session_uuid=session_uuid)
lbl_uuid = net_ops.create_load_balancer_listener(lb_listener_option).uuid
conditions = res_ops.gen_query_conditions('vmInstance.uuid', '=', vm_uuid)
vm_nic_uuid = res_ops.query_resource(res_ops.VM_NIC, conditions)[0].uuid
net_ops.add_nic_to_load_balancer(lbl_uuid, [vm_nic_uuid], session_uuid=session_uuid)
net_ops.remove_nic_from_load_balancer(lbl_uuid, [vm_nic_uuid], session_uuid=session_uuid)
net_ops.refresh_load_balancer(lb_uuid, session_uuid=session_uuid)
net_ops.delete_load_balancer_listener(lbl_uuid, session_uuid=session_uuid)
net_ops.delete_load_balancer(lb_uuid, session_uuid=session_uuid)
net_ops.delete_vip(vip.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
if lb_service_need_detach:
net_ops.detach_lb_service_from_l3network(l3_net_uuid, service_providor_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_port_forwarding(self, session_uuid=None):
conditions = res_ops.gen_query_conditions('category', '=', 'Public')
l3_pub_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('name', '=', 'vrouter')
pf_service_providor_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('l3Network.uuid', '=', l3_net_uuid)
network_service_list = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF, conditions)
pf_service_need_attach = True
pf_service_need_detach = False
for service in network_service_list:
if service.networkServiceType == 'PortForwarding':
pf_service_need_attach = False
if pf_service_need_attach:
net_ops.attach_pf_service_to_l3network(l3_net_uuid, pf_service_providor_uuid, session_uuid=session_uuid)
pf_service_need_detach = True
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_for_port_forwarding_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
conditions = res_ops.gen_query_conditions('vmInstance.uuid', '=', vm_uuid)
vm_nic_uuid = res_ops.query_resource(res_ops.VM_NIC, conditions)[0].uuid
vip_option = test_util.VipOption()
vip_option.set_name("vip_for_port_forwarding_policy_checker")
vip_option.set_session_uuid(session_uuid)
vip_option.set_l3_uuid(l3_pub_uuid)
vip = net_ops.create_vip(vip_option)
pf_rule_creation_option = test_util.PortForwardingRuleOption()
pf_rule_creation_option.set_vip_uuid(vip.uuid)
pf_rule_creation_option.set_protocol('TCP')
pf_rule_creation_option.set_vip_ports('8080', '8088')
pf_rule_creation_option.set_private_ports('8080', '8088')
pf_rule_creation_option.set_name('port_forwarding_rule_policy_checker')
pf_rule_creation_option.set_session_uuid(session_uuid=session_uuid)
pf_rule_uuid = net_ops.create_port_forwarding(pf_rule_creation_option).uuid
net_ops.attach_port_forwarding(pf_rule_uuid, vm_nic_uuid, session_uuid=session_uuid)
net_ops.detach_port_forwarding(pf_rule_uuid, session_uuid=session_uuid)
net_ops.delete_port_forwarding(pf_rule_uuid, session_uuid=session_uuid)
net_ops.delete_vip(vip.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
if pf_service_need_detach:
net_ops.detach_pf_service_from_l3network(l3_net_uuid, pf_service_providor_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_scheduler(self, session_uuid=None):
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('vm_for_scheduler_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
start_date = int(time.time())
schd_job = schd_ops.create_scheduler_job('start_vm_scheduler_policy_checker', 'start vm scheduler policy checker', vm_uuid, 'startVm', None, session_uuid=session_uuid)
schd_trigger = schd_ops.create_scheduler_trigger('start_vm_scheduler_policy_checker', start_date+5, None, 15, 'simple', session_uuid=session_uuid)
schd_ops.add_scheduler_job_to_trigger(schd_trigger.uuid, schd_job.uuid, session_uuid=session_uuid)
schd_ops.change_scheduler_state(schd_job.uuid, 'disable', session_uuid=session_uuid)
schd_ops.change_scheduler_state(schd_job.uuid, 'enable', session_uuid=session_uuid)
schd_ops.remove_scheduler_job_from_trigger(schd_trigger.uuid, schd_job.uuid, session_uuid=session_uuid)
schd_ops.del_scheduler_job(schd_job.uuid, session_uuid=session_uuid)
schd_ops.del_scheduler_trigger(schd_trigger.uuid, session_uuid=session_uuid)
schd_ops.get_current_time()
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
def check_pci(self, session_uuid=None):
#Haven't simulator pci device, skip to check
pass
def check_zwatch(self, session_uuid=None):
http_endpoint_name='http_endpoint_for zwatch_policy_checker'
url = 'http://localhost:8080/webhook-url'
http_endpoint=zwt_ops.create_sns_http_endpoint(url, http_endpoint_name, session_uuid=session_uuid)
http_endpoint_uuid=http_endpoint.uuid
sns_topic_uuid = zwt_ops.create_sns_topic('sns_topic_for zwatch_policy_checker', session_uuid=session_uuid).uuid
sns_topic_uuid1 = zwt_ops.create_sns_topic('sns_topic_for zwatch_policy_checker_01', session_uuid=session_uuid).uuid
zwt_ops.subscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=session_uuid)
namespace = 'ZStack/VM'
actions = [{"actionUuid": sns_topic_uuid, "actionType": "sns"}]
period = 60
comparison_operator = 'GreaterThanOrEqualTo'
threshold = 10
metric_name = 'CPUUsedUtilization'
labels = [{"key": "NewState", "op": "Equal", "value": "Disconnected"}]
event_name = 'VMStateChangedOnHost'
alarm_uuid = zwt_ops.create_alarm(comparison_operator, period, threshold, namespace, metric_name, session_uuid=session_uuid).uuid
event_sub_uuid = zwt_ops.subscribe_event(namespace, event_name, actions, labels, session_uuid=session_uuid).uuid
zwt_ops.update_alarm(alarm_uuid, comparison_operator='GreaterThan', session_uuid=session_uuid)
zwt_ops.update_sns_application_endpoint(http_endpoint_uuid, 'new_endpoint_name', 'new description', session_uuid=session_uuid)
zwt_ops.add_action_to_alarm(alarm_uuid, sns_topic_uuid1, 'sns', session_uuid=session_uuid)
zwt_ops.remove_action_from_alarm(alarm_uuid, sns_topic_uuid, session_uuid=session_uuid)
zwt_ops.change_alarm_state(alarm_uuid, 'disable', session_uuid=session_uuid)
zwt_ops.change_sns_topic_state(sns_topic_uuid, 'disable', session_uuid=session_uuid)
zwt_ops.change_sns_application_endpoint_state(http_endpoint_uuid, 'disable', session_uuid=session_uuid)
zwt_ops.delete_alarm(alarm_uuid, session_uuid=session_uuid)
zwt_ops.unsubscribe_event(event_sub_uuid, session_uuid=session_uuid)
zwt_ops.unsubscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=session_uuid)
zwt_ops.delete_sns_topic(sns_topic_uuid, session_uuid=session_uuid)
zwt_ops.delete_sns_topic(sns_topic_uuid1, session_uuid=session_uuid)
zwt_ops.delete_sns_application_endpoint(http_endpoint_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_sns(self, session_uuid=None):
http_endpoint_name='http_endpoint_for zwatch_policy_checker'
url = 'http://localhost:8080/webhook-url'
http_endpoint=zwt_ops.create_sns_http_endpoint(url, http_endpoint_name)
http_endpoint_uuid=http_endpoint.uuid
sns_topic_uuid = zwt_ops.create_sns_topic('sns_topic_for zwatch_policy_checker', session_uuid=session_uuid).uuid
sns_topic_uuid1 = zwt_ops.create_sns_topic('sns_topic_for zwatch_policy_checker_01', session_uuid=session_uuid).uuid
zwt_ops.subscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=session_uuid)
namespace = 'ZStack/VM'
actions = [{"actionUuid": sns_topic_uuid, "actionType": "sns"}]
period = 60
comparison_operator = 'GreaterThanOrEqualTo'
threshold = 10
metric_name = 'CPUUsedUtilization'
alarm_uuid = zwt_ops.create_alarm(comparison_operator, period, threshold, namespace, metric_name).uuid
labels = [{"key": "NewState", "op": "Equal", "value": "Disconnected"}]
event_name = 'VMStateChangedOnHost'
event_sub_uuid = zwt_ops.subscribe_event(namespace, event_name, actions, labels).uuid
zwt_ops.update_sns_application_endpoint(http_endpoint_uuid, 'new_endpoint_name', 'new description', session_uuid=session_uuid)
zwt_ops.add_action_to_alarm(alarm_uuid, sns_topic_uuid1, 'sns')
zwt_ops.remove_action_from_alarm(alarm_uuid, sns_topic_uuid)
zwt_ops.change_sns_topic_state(sns_topic_uuid, 'disable', session_uuid=session_uuid)
zwt_ops.change_sns_application_endpoint_state(http_endpoint_uuid, 'disable', session_uuid=session_uuid)
zwt_ops.delete_alarm(alarm_uuid)
zwt_ops.unsubscribe_event(event_sub_uuid)
zwt_ops.unsubscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=session_uuid)
zwt_ops.delete_sns_topic(sns_topic_uuid, session_uuid=session_uuid)
zwt_ops.delete_sns_topic(sns_topic_uuid1, session_uuid=session_uuid)
zwt_ops.delete_sns_application_endpoint(http_endpoint_uuid, session_uuid=session_uuid)
return self.judge(True)
def check_platform_admin_permission(self, username, password):
session_uuid = iam2_ops.login_iam2_virtual_id(username, password)
#Check if have permission to create project
try:
project_uuid = iam2_ops.create_iam2_project(name='platform_admin_create_project_permission_check', session_uuid=session_uuid).uuid
iam2_ops.delete_iam2_project(project_uuid, session_uuid=session_uuid)
except:
test_util.test_logger('Check Result: [Virtual ID:] %s is Platform Admin, but create project failed' % username)
return self.judge(False)
def check_project_admin_permission(self, username, password):
session_uuid = iam2_ops.login_iam2_virtual_id(username, password)
#Check if have permission to create project
#try:
# project_uuid = iam2_ops.create_iam2_project(name='porject_admin_create_project_permission_check', session_uuid=session_uuid).uuid
# iam2_ops.delete_iam2_project(project_uuid, session_uuid=session_uuid)
# test_util.test_logger('Check Result: [Virtual ID:] %s is Porject Admin, but is able to create project' % username)
# return self.judge(False)
#except KeyError as e:
# print e
#Check if have permission to setup project operator
try:
project_operator_uuid = iam2_ops.create_iam2_virtual_id(name='project_admin_change_project_operator_permission_check', password='b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86').uuid
project_uuid = ''
for lst in self.test_obj.get_vid_attributes():
if lst['name'] == '__ProjectAdmin__':
project_uuid = lst['value']
if project_uuid != '':
iam2_ops.add_iam2_virtual_ids_to_project([project_operator_uuid], project_uuid)
attributes = [{"name": "__ProjectOperator__", "value": project_uuid}]
conditions = res_ops.gen_query_conditions('uuid', '=', project_uuid)
project_name = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0].name
session_uuid = iam2_ops.login_iam2_project(project_name, session_uuid=session_uuid).uuid
iam2_ops.add_attributes_to_iam2_virtual_id(project_operator_uuid, attributes, session_uuid=session_uuid)
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
except KeyError as e:
test_util.test_logger('Check Result: [Virtual ID:] %s is Project Admin, but setup project operator failed' % username)
return self.judge(False)
return self.judge(True)
def check_project_operator_permission(self, username, password):
session_uuid = iam2_ops.login_iam2_virtual_id(username, password)
#Check if have permission to setup project operator
#try:
# project_operator_uuid = iam2_ops.create_iam2_virtual_id(name='project_admin_create_project_operator_permission_check', password='b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86', attributes=[{"name":"__ProjectOperator__"}], session_uuid=session_uuid).uuid
# test_util.test_logger('Check Result: [Virtual ID:] %s is Porject Operator, but is able to create other project operator' % username)
# iam2_ops.delete_iam2_virtual_id(project_operator_uuid, session_uuid=session_uuid)
# return self.judge(False)
#except:
# pass
#Check if have permission to add virtual id to project
normal_user_uuid = iam2_ops.create_iam2_virtual_id(name='project_operator_add_virtual_add_to_project_permission_check', password='b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86').uuid
try:
project_uuid = ''
for lst in self.test_obj.get_vid_attributes():
if lst['name'] == '__ProjectOperator__':
project_uuid = lst['value']
if project_uuid != '':
conditions = res_ops.gen_query_conditions('uuid', '=', project_uuid)
project_name = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0].name
session_uuid = iam2_ops.login_iam2_project(project_name, session_uuid=session_uuid).uuid
iam2_ops.add_iam2_virtual_ids_to_project([normal_user_uuid], project_uuid)
iam2_ops.delete_iam2_virtual_id(normal_user_uuid)
except KeyError as e:
test_util.test_logger('Check Result: [Virtual ID:] %s is Project Operator, but add user to project failed' % username)
return self.judge(False)
return self.judge(True)
def check_system_admin_permission(self, username, password):
test_util.test_logger("check_system_admin_permission")
session_uuid = acc_ops.login_by_account(username, password)
self.check_vm_operation(session_uuid=session_uuid)
self.check_image_operation(session_uuid=session_uuid)
self.check_snapshot(session_uuid=session_uuid)
self.check_volume_operation(session_uuid=session_uuid)
self.check_affinity_group(session_uuid=session_uuid)
self.check_networks(session_uuid=session_uuid)
self.check_eip(session_uuid=session_uuid)
self.check_security_group(session_uuid=session_uuid)
self.check_load_balancer(session_uuid=session_uuid)
self.check_port_forwarding(session_uuid=session_uuid)
self.check_scheduler(session_uuid=session_uuid)
self.check_pci(session_uuid=session_uuid)
self.check_zwatch(session_uuid=session_uuid)
self.check_sns(session_uuid=session_uuid)
def check_security_admin_permission(self, username, password):
session_uuid = acc_ops.login_by_account(username, password)
vid_uuid = self.test_obj.get_vid().uuid
role_uuid = iam2_ops.create_role('security_created_role', session_uuid=session_uuid).uuid
statements = [{"effect":"Allow","actions":["org.zstack.header.image.**"]}]
policy_uuid = iam2_ops.create_policy('policy', statements, session_uuid=session_uuid).uuid
iam2_ops.attach_policy_to_role(policy_uuid, role_uuid, session_uuid=session_uuid)
iam2_ops.add_roles_to_iam2_virtual_id([role_uuid], vid_uuid, session_uuid=session_uuid)
iam2_ops.detach_policy_from_role(policy_uuid, role_uuid, session_uuid=session_uuid)
iam2_ops.update_role(role_uuid, [{"effect":"Allow","actions":[]}], session_uuid=session_uuid)
iam2_ops.add_policy_statements_to_role(role_uuid, statements, session_uuid=session_uuid)
iam2_ops.remove_roles_from_iam2_virtual_id([role_uuid], vid_uuid, session_uuid=session_uuid)
disable = 'disable'
enable = 'enable'
Disabled = 'Disabled'
iam2_ops.change_role_state(role_uuid, disable, session_uuid=session_uuid)
res_inv = res_ops.get_resource(res_ops.ROLE, uuid=role_uuid, session_uuid=session_uuid)[0]
if res_inv.state != Disabled:
test_util.test_fail("test change iam2 role state fail")
iam2_ops.change_role_state(role_uuid, enable, session_uuid=session_uuid)
res_ops.get_resource(res_ops.POLICY, session_uuid=session_uuid)
res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID, session_uuid=session_uuid)
res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID_GROUP, session_uuid=session_uuid)
res_ops.get_resource(res_ops.QUOTA, session_uuid=session_uuid)
virtual_id_new_name = 'virtual_id_new_name'
virtual_id_new_des = 'virtual_id_new_des'
virtual_id_new_password = 'virtual_id_new_password'
iam2_ops.update_iam2_virtual_id(vid_uuid, virtual_id_new_name, virtual_id_new_des, virtual_id_new_password, session_uuid=session_uuid)
action = "org.zstack.header.image.**"
statement_uuid = iam2_ops.get_policy_statement_uuid_of_role(role_uuid, action)
iam2_ops.remove_policy_statements_from_role(role_uuid, [statement_uuid], session_uuid=session_uuid)
iam2_ops.delete_role(role_uuid, session_uuid=session_uuid)
#TODO:
# org.zstack.iam2.api.APIRemoveRolesFromIAM2VirtualIDGroupMsg
# org.zstack.header.identity.APIAttachPolicyToUserGroupMsg
# org.zstack.header.identity.APIDetachPolicyFromUserGroupMsg
def check_audit_admin_permission(self, username, password):
audit_session_uuid = acc_ops.login_by_account(username, password)
res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SFTP_BACKUP_STORAGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.CEPH_BACKUP_STORAGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.ZONE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.L2_NETWORK, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.L2_VLAN_NETWORK, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.L2_VXLAN_NETWORK, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.L2_VXLAN_NETWORK_POOL, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VNI_RANGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.CLUSTER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.L3_NETWORK, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.INSTANCE_OFFERING, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IMAGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VOLUME, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SHARE_VOLUME, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VM_INSTANCE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IP_RANGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.HOST, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.NETWORK_SERVICE_PROVIDER, session_uuid=audit_session_uuid)
#res_ops.get_resource(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.APPLIANCE_VM, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VIRTUALROUTER_VM, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.DISK_OFFERING, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.ACCOUNT, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.CEPH_PRIMARY_STORAGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.CEPH_PRIMARY_STORAGE_POOL, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SECURITY_GROUP, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SECURITY_GROUP_RULE, session_uuid=audit_session_uuid)
#res_ops.get_resource(res_ops.VM_SECURITY_GROUP, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VM_NIC, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.PORT_FORWARDING, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.MANAGEMENT_NODE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.EIP, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VIP, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VR_OFFERING, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SYSTEM_TAG, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.USER_TAG, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VOLUME_SNAPSHOT_TREE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VOLUME_SNAPSHOT, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.USER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.LOAD_BALANCER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.LOAD_BALANCER_LISTENER, session_uuid=audit_session_uuid)
#res_ops.get_resource(res_ops.LOCAL_STORAGE_RESOURCE_REF, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IMAGE_STORE_BACKUP_STORAGE, session_uuid=audit_session_uuid)
#res_ops.get_resource(res_ops.SCHEDULER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SCHEDULERJOB, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SCHEDULERTRIGGER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VCENTER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VCENTER_CLUSTER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VCENTER_BACKUP_STORAGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VCENTER_PRIMARY_STORAGE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.MONITOR_TRIGGER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.MONITOR_TRIGGER_ACTION, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.PXE_SERVER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.CHASSIS, session_uuid=audit_session_uuid)
#res_ops.get_resource(res_ops.HWINFO, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.BAREMETAL_INS, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.LONGJOB, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.ALARM, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.EVENT_SUBSCRIPTION, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SNS_APPLICATION_ENDPOINT, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SNS_APPLICATION_PLATFORM, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SNS_TOPIC, session_uuid=audit_session_uuid)
#res_ops.get_resource(res_ops.SNS_TOPIC_SUBSCRIBER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SNS_DING_TALK_ENDPOINT, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SNS_EMAIL_ENDPOINT, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SNS_EMAIL_PLATFORM, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SNS_HTTP_ENDPOINT, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SNS_TEXT_TEMPLATE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.AFFINITY_GROUP, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_ORGANIZATION, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_PROJECT, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID_GROUP, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_PROJECT_TEMPLATE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID_GROUP_ATTRIBUTE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_VIRTUAL_ID_ATTRIBUTE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_PROJECT_ATTRIBUTE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IAM2_ORGANIZATION_ATTRIBUTE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.ROLE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.POLICY, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.DATACENTER, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.ALIYUNNAS_ACCESSGROUP, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.NAS_FILESYSTEM, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.NAS_MOUNTTARGET, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.STACK_TEMPLATE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.RESOURCE_STACK, session_uuid=audit_session_uuid)
#res_ops.get_resource(res_ops.EVENT_FROM_STACK, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.TICKET, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.TICKET_HISTORY, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.QUOTA, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.CERTIFICATE, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.VOLUME_BACKUP, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.IPSEC_CONNECTION, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.SCSI_LUN, session_uuid=audit_session_uuid)
res_ops.get_resource(res_ops.ISCSI_SERVER, session_uuid=audit_session_uuid)
def check(self):
super(zstack_vid_attr_checker, self).check()
password = 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86'
virtual_id = self.test_obj.get_vid()
for lst in self.test_obj.get_vid_attributes():
if lst['name'] == '__PlatformAdmin__' and self.test_obj.get_customized != "noDeleteAdminPermission":
self.check_login_by_vid(virtual_id.name, password)
self.check_platform_admin_permission(virtual_id.name, password)
elif lst['name'] == '__ProjectAdmin__':
self.check_login_by_vid(virtual_id.name, password)
self.check_project_admin_permission(virtual_id.name, password)
elif lst['name'] == '__ProjectOperator__':
self.check_login_by_vid(virtual_id.name, password)
self.check_project_operator_permission(virtual_id.name, password)
elif lst['name'] == '__AuditAdmin__':
self.check_login_by_account(virtual_id.name, password)
self.check_audit_admin_permission(virtual_id.name, password)
elif lst['name'] == '__SecurityAdmin__':
self.check_login_by_account(virtual_id.name, password)
self.check_security_admin_permission(virtual_id.name, password)
elif lst['name'] == '__SystemAdmin__':
self.check_login_by_account(virtual_id.name, password)
self.check_system_admin_permission(virtual_id.name, password)
else:
test_util.test_fail("not found matched attribute %s" %(str(lst['name'])))
return self.judge(True)
class zstack_vid_policy_checker(checker_header.TestChecker):
def __init__(self):
self.password = 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86'
super(zstack_vid_policy_checker, self).__init__()
def check_login(self, username, password):
customized = self.test_obj.get_customized()
if customized == None:
virtual_id = self.test_obj.get_vid()
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=', virtual_id.uuid)
project_name = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0].name
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(username, password)
if customized == None:
iam2_ops.login_iam2_project(project_name, plain_user_session_uuid)
def check_vm_operation(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_net_uuid])
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_policy_checker')
vm_creation_option.set_session_uuid(project_login_session_uuid)
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
# VM related ops: Create, Delete, Expunge, Start, Stop, Suspend, Resume, Migrate
vm_ops.stop_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.start_vm(vm_uuid, session_uuid=project_login_session_uuid)
candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm_uuid)
if candidate_hosts != None and test_lib.lib_check_vm_live_migration_cap(vm):
try:
vm_ops.migrate_vm(vm_uuid, candidate_hosts.inventories[0].uuid, session_uuid=project_login_session_uuid)
except:
vm_ops.migrate_vm(vm_uuid, candidate_hosts[0].uuid, session_uuid=project_login_session_uuid)
vm_ops.stop_vm(vm_uuid, force='cold', session_uuid=project_login_session_uuid)
vm_ops.start_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.suspend_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.resume_vm(vm_uuid, session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
vm_ops.destroy_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.expunge_vm(vm_uuid, session_uuid=project_login_session_uuid)
return self.judge(True)
else:
try:
vm_ops.destroy_vm(vm_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
return
def check_vm_operation_without_create_permission(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
try:
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
acc_ops.share_resources([project_linked_account_uuid], [l3_net_uuid])
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_without_create_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
res_ops.change_recource_owner(project_linked_account_uuid, vm_uuid)
# VM related ops: Create, Delete, Expunge, Start, Stop, Suspend, Resume, Migrate
vm_ops.stop_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.start_vm(vm_uuid, session_uuid=project_login_session_uuid)
candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm_uuid)
if candidate_hosts != None and test_lib.lib_check_vm_live_migration_cap(vm):
try:
vm_ops.migrate_vm(vm_uuid, candidate_hosts.inventories[0].uuid, session_uuid=project_login_session_uuid)
except:
vm_ops.migrate_vm(vm_uuid, candidate_hosts[0].uuid, session_uuid=project_login_session_uuid)
vm_ops.stop_vm(vm_uuid, force='cold', session_uuid=project_login_session_uuid)
vm_ops.start_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.suspend_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.resume_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.destroy_vm(vm_uuid, session_uuid=project_login_session_uuid)
vm_ops.expunge_vm(vm_uuid, session_uuid=project_login_session_uuid)
except Exception as e:
test_util.test_logger('Check Result: [Virtual ID:] %s has permission for vm except creation but vm check failed' % virtual_id.name)
test_util.test_logger('Excepiton info: %s' %e)
return self.judge(False)
return self.judge(True)
def check_image_operation(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
image_option = test_util.ImageOption()
image_option.set_name('image_policy_checker')
image_option.set_description('image for policy check')
image_option.set_format('raw')
image_option.set_mediaType('RootVolumeTemplate')
image_option.set_backup_storage_uuid_list([bs.uuid])
image_option.url = "http://fake_iamge/image.raw"
image_option.set_session_uuid(project_login_session_uuid)
image_uuid = img_ops.add_image(image_option).uuid
img_ops.sync_image_size(image_uuid, session_uuid=project_login_session_uuid)
img_ops.change_image_state(image_uuid, 'disable', session_uuid=project_login_session_uuid)
img_ops.change_image_state(image_uuid, 'enable', session_uuid=project_login_session_uuid)
if bs.type == 'ImageStoreBackupStorage':
img_ops.export_image_from_backup_storage(image_uuid, bs.uuid, session_uuid=project_login_session_uuid)
#img_ops.delete_exported_image_from_backup_storage(image_uuid, bs.uuid, session_uuid=project_login_session_uuid)
img_ops.delete_exported_image_from_backup_storage(image_uuid, bs.uuid)
img_ops.set_image_qga_enable(image_uuid, session_uuid=project_login_session_uuid)
img_ops.set_image_qga_disable(image_uuid, session_uuid=project_login_session_uuid)
cond = res_ops.gen_query_conditions('name', '=', "image_policy_checker")
image = res_ops.query_resource(res_ops.IMAGE, cond, session_uuid=project_login_session_uuid)
if image == None:
test_util.test_fail('fail to query image just added')
return self.judge(False)
if self.test_obj.get_customized() == None:
img_ops.delete_image(image_uuid, session_uuid=project_login_session_uuid)
img_ops.expunge_image(image_uuid, session_uuid=project_login_session_uuid)
return self.judge(True)
else:
try:
img_ops.delete_image(image_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_image should not be runned")
return 1
except Exception as e:
pass
try:
img_ops.expunge_image(image_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("expunge_image should not be runned")
return 1
except Exception as e:
pass
def check_snapshot(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
disk_offering_uuid = res_ops.query_resource(res_ops.DISK_OFFERING)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [disk_offering_uuid])
volume_option = test_util.VolumeOption()
volume_option.set_disk_offering_uuid(disk_offering_uuid)
volume_option.set_name('data_volume_for_snapshot_policy_checker')
data_volume = vol_ops.create_volume_from_offering(volume_option)
if self.test_obj.get_customized() == None:
res_ops.change_recource_owner(project_linked_account_uuid, data_volume.uuid)
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_net_uuid])
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_without_create_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
vol_ops.attach_volume(data_volume.uuid, vm_uuid)
snapshot_option = test_util.SnapshotOption()
snapshot_option.set_volume_uuid(data_volume.uuid)
snapshot_option.set_name('snapshot_policy_checker')
snapshot_option.set_description('snapshot for policy check')
snapshot_option.set_session_uuid(project_login_session_uuid)
snapshot_uuid = vol_ops.create_snapshot(snapshot_option).uuid
vm_ops.stop_vm(vm_uuid, force='cold')
vol_ops.use_snapshot(snapshot_uuid, project_login_session_uuid)
#vol_ops.backup_snapshot(snapshot_uuid, bs.uuid, project_login_session_uuid)
#new_volume = vol_ops.create_volume_from_snapshot(snapshot_uuid)
#vol_ops.delete_snapshot_from_backupstorage(snapshot_uuid, [bs.uuid], session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
vol_ops.delete_snapshot(snapshot_uuid, project_login_session_uuid)
vol_ops.delete_volume(data_volume.uuid)
vol_ops.expunge_volume(data_volume.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
else:
try:
vol_ops.delete_snapshot(snapshot_uuid, project_login_session_uuid)
test_util.test_logger("delete_snapshot should not be runned")
return 1
except Exception as e:
pass
try:
vol_ops.delete_volume(data_volume.uuid, project_login_session_uuid)
test_util.test_logger("delete_volume should not be runned")
return 1
except Exception as e:
pass
try:
vol_ops.expunge_volume(data_volume.uuid, project_login_session_uuid)
test_util.test_logger("expunge_volume should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.destroy_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
def check_volume_operation(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
# Volume related ops: Create, Delete, Expunge, Attach, Dettach, Enable, Disable
disk_offering_uuid = res_ops.query_resource(res_ops.DISK_OFFERING)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [disk_offering_uuid])
volume_option = test_util.VolumeOption()
volume_option.set_disk_offering_uuid(disk_offering_uuid)
volume_option.set_name('data_volume_policy_checker')
volume_option.set_session_uuid(project_login_session_uuid)
data_volume = vol_ops.create_volume_from_offering(volume_option)
vol_ops.stop_volume(data_volume.uuid, session_uuid=project_login_session_uuid)
vol_ops.start_volume(data_volume.uuid, session_uuid=project_login_session_uuid)
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_net_uuid])
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_for_vol_policy_checker')
#vm_creation_option.set_session_uuid(project_login_session_uuid)
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
if self.test_obj.get_customized() == None:
res_ops.change_recource_owner(project_linked_account_uuid, vm_uuid)
vol_ops.attach_volume(data_volume.uuid, vm_uuid, session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
vol_ops.detach_volume(data_volume.uuid, vm_uuid, session_uuid=project_login_session_uuid)
vol_ops.delete_volume(data_volume.uuid, session_uuid=project_login_session_uuid)
vol_ops.expunge_volume(data_volume.uuid, session_uuid=project_login_session_uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
else:
try:
vol_ops.delete_volume(data_volume.uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_volume should not be runned")
return 1
except Exception as e:
pass
try:
vol_ops.expunge_volume(data_volume.uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("expunge_volume should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.destroy_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
def check_affinity_group(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_net_uuid])
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_for_affinity_group_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
if self.test_obj.get_customized() == None:
res_ops.change_recource_owner(project_linked_account_uuid, vm_uuid)
ag_uuid = ag_ops.create_affinity_group('affinity_group_policy_checker', 'antiHard', session_uuid=project_login_session_uuid).uuid
ag_ops.add_vm_to_affinity_group(ag_uuid, vm_uuid, session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
ag_ops.remove_vm_from_affinity_group(ag_uuid, vm_uuid, session_uuid=project_login_session_uuid)
ag_ops.delete_affinity_group(ag_uuid, session_uuid=project_login_session_uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
else:
try:
ag_ops.delete_affinity_group(ag_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_affinity_group should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.destroy_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
def check_networks(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid
vxlan_pool = res_ops.get_resource(res_ops.L2_VXLAN_NETWORK_POOL)
clear_vxlan_pool = False
if vxlan_pool == None or len(vxlan_pool) == 0:
vxlan_pool_uuid = vxlan_ops.create_l2_vxlan_network_pool('vxlan_poll_for networks_polocy_checker', zone_uuid).uuid
vni_uuid = vxlan_ops.create_vni_range('vni_range_for_networks_policy_checker', '10000', '20000', vxlan_pool_uuid).uuid
clear_vxlan_pool = True
elif len(vxlan_pool[0].attachedVniRanges) == 0:
vni_uuid = vxlan_ops.create_vni_range('vni_range_for_networks_policy_checker', '10000', '20000', vxlan_pool[0].uuid).uuid
clear_vxlan_pool = True
else:
vxlan_pool_uuid = vxlan_pool[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [vxlan_pool_uuid])
vxlan_pool_uuid = res_ops.get_resource(res_ops.L2_VXLAN_NETWORK_POOL, session_uuid=project_login_session_uuid)[0].uuid
vxlan_l2_uuid = vxlan_ops.create_l2_vxlan_network('vxlan_for_policy_checker', vxlan_pool_uuid, zone_uuid, session_uuid=project_login_session_uuid).uuid
conditions = res_ops.gen_query_conditions('name', '=', 'vrouter')
service_providor_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, conditions, session_uuid=project_login_session_uuid)[0].uuid
l3_uuid = net_ops.create_l3('l3_network_for_policy_checker', vxlan_l2_uuid, session_uuid=project_login_session_uuid).uuid
net_ops.attach_network_service_to_l3network(l3_uuid, service_providor_uuid, session_uuid=project_login_session_uuid)
#net_ops.detach_network_service_from_l3network(l3_uuid, service_providor_uuid, session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
net_ops.delete_l3(l3_uuid, session_uuid=project_login_session_uuid)
if clear_vxlan_pool:
#vxlan_ops.delete_vni_range(vni_uuid, session_uuid=project_login_session_uuid)
vxlan_ops.delete_vni_range(vni_uuid)
net_ops.delete_l2(vxlan_l2_uuid, session_uuid=project_login_session_uuid)
return self.judge(True)
else:
try:
net_ops.delete_l3(l3_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_l3 should not be runned")
return 1
except Exception as e:
pass
try:
if clear_vxlan_pool:
vxlan_ops.delete_vni_range(vni_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_vni_range should not be runned")
return 1
except Exception as e:
pass
try:
net_ops.delete_l2(vxlan_l2_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_l2 should not be runned")
return 1
except Exception as e:
pass
def check_eip(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('category', '=', 'Public')
l3_pub_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid])
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_for_eip_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
if self.test_obj.get_customized() == None:
res_ops.change_recource_owner(project_linked_account_uuid, vm_uuid)
vip_option = test_util.VipOption()
vip_option.set_name("vip_for_eip_policy_checker")
vip_option.set_session_uuid(project_login_session_uuid)
vip_option.set_l3_uuid(l3_pub_uuid)
vip = net_ops.create_vip(vip_option)
conditions = res_ops.gen_query_conditions('vmInstance.uuid', '=', vm_uuid)
vm_nic_uuid = res_ops.query_resource(res_ops.VM_NIC, conditions)[0].uuid
test_util.test_logger('vip creation finished, vm nic uuid is %s' %vm_nic_uuid)
eip_option = test_util.EipOption()
eip_option.set_name('eip_policy_checker')
eip_option.set_session_uuid(project_login_session_uuid)
eip_option.set_vip_uuid(vip.uuid)
eip_option.set_vm_nic_uuid(vm_nic_uuid)
eip = net_ops.create_eip(eip_option)
if self.test_obj.get_customized() == None:
net_ops.detach_eip(eip.uuid, session_uuid=project_login_session_uuid)
net_ops.attach_eip(eip.uuid, vm_nic_uuid, session_uuid=project_login_session_uuid)
net_ops.delete_eip(eip.uuid)
net_ops.delete_vip(vip.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
acc_ops.revoke_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid, image_uuid, instance_offering_uuid])
return self.judge(True)
else:
try:
net_ops.detach_eip(eip.uuid, project_login_session_uuid)
test_util.test_logger("detach_eip should not be runned")
return 1
except Exception as e:
pass
try:
net_ops.delete_eip(eip.uuid, project_login_session_uuid)
test_util.test_logger("delete_eip should not be runned")
return 1
except Exception as e:
pass
try:
net_ops.delete_vip(vip.uuid, project_login_session_uuid)
test_util.test_logger("delete_vip should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.destroy_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
#acc_ops.revoke_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid, image_uuid, instance_offering_uuid])
test_util.test_logger("revoke_resources should not be runned")
def check_security_group(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('name', '=', 'SecurityGroup')
sg_service_providor_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_net_uuid])
conditions = res_ops.gen_query_conditions('l3Network.uuid', '=', l3_net_uuid)
network_service_list = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF, conditions)
sg_service_need_attach = True
sg_service_need_detach = False
for service in network_service_list:
if service.networkServiceType == 'SecurityGroup':
sg_service_need_attach = False
if sg_service_need_attach:
net_ops.attach_sg_service_to_l3network(l3_net_uuid, sg_service_providor_uuid, session_uuid=project_login_session_uuid)
sg_service_need_detach = True
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_net_uuid])
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_for_security_group_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
if self.test_obj.get_customized() == None:
res_ops.change_recource_owner(project_linked_account_uuid, vm_uuid)
sg_creation_option = test_util.SecurityGroupOption()
sg_creation_option.set_name('security_group_policy_checker')
sg_creation_option.set_session_uuid(session_uuid=project_login_session_uuid)
sg_uuid = net_ops.create_security_group(sg_creation_option).uuid
net_ops.attach_security_group_to_l3(sg_uuid, l3_net_uuid, session_uuid=project_login_session_uuid)
conditions = res_ops.gen_query_conditions('vmInstance.uuid', '=', vm_uuid)
vm_nic_uuid = res_ops.query_resource(res_ops.VM_NIC, conditions)[0].uuid
net_ops.add_nic_to_security_group(sg_uuid, [vm_nic_uuid], session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
net_ops.remove_nic_from_security_group(sg_uuid, [vm_nic_uuid], session_uuid=project_login_session_uuid)
net_ops.detach_security_group_from_l3(sg_uuid, l3_net_uuid, session_uuid=project_login_session_uuid)
net_ops.delete_security_group(sg_uuid, session_uuid=project_login_session_uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
if sg_service_need_detach:
net_ops.detach_sg_service_from_l3network(l3_net_uuid, sg_service_providor_uuid, session_uuid=project_login_session_uuid)
acc_ops.revoke_resources([project_linked_account_uuid], [l3_net_uuid, image_uuid, instance_offering_uuid])
return self.judge(True)
else:
try:
net_ops.delete_security_group(sg_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_security_group should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.destroy_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
if sg_service_need_detach:
net_ops.detach_sg_service_from_l3network(l3_net_uuid, sg_service_providor_uuid, session_uuid=project_login_session_uuid)
#acc_ops.revoke_resources([project_linked_account_uuid], [l3_net_uuid, image_uuid, instance_offering_uuid])
def check_load_balancer(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
conditions = res_ops.gen_query_conditions('category', '=', 'Public')
l3_pub_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('name', '=', 'vrouter')
service_providor_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid])
conditions = res_ops.gen_query_conditions('l3Network.uuid', '=', l3_net_uuid)
network_service_list = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF, conditions)
lb_service_need_attach = True
lb_service_need_detach = False
for service in network_service_list:
if service.networkServiceType == 'LoadBalancer':
lb_service_need_attach = False
if lb_service_need_attach:
#net_ops.attach_lb_service_to_l3network(l3_net_uuid, service_providor_uuid, session_uuid=project_login_session_uuid)
net_ops.attach_lb_service_to_l3network(l3_net_uuid, service_providor_uuid)
lb_service_need_detach = True
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_for_load_balancer_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
if self.test_obj.get_customized() == None:
res_ops.change_recource_owner(project_linked_account_uuid, vm_uuid)
vip_option = test_util.VipOption()
vip_option.set_name("vip_for_load_balancer_policy_checker")
vip_option.set_session_uuid(project_login_session_uuid)
vip_option.set_l3_uuid(l3_pub_uuid)
vip = net_ops.create_vip(vip_option)
lb_uuid = net_ops.create_load_balancer(vip.uuid, 'load_balancer_policy_checker', session_uuid=project_login_session_uuid).uuid
lb_listener_option = test_util.LoadBalancerListenerOption()
lb_listener_option.set_name('load_balancer_listener_policy_checker')
lb_listener_option.set_load_balancer_uuid(lb_uuid)
lb_listener_option.set_load_balancer_port('2222')
lb_listener_option.set_instance_port('80')
lb_listener_option.set_protocol('http')
lb_listener_option.set_session_uuid(session_uuid=project_login_session_uuid)
lbl_uuid = net_ops.create_load_balancer_listener(lb_listener_option).uuid
conditions = res_ops.gen_query_conditions('vmInstance.uuid', '=', vm_uuid)
vm_nic_uuid = res_ops.query_resource(res_ops.VM_NIC, conditions)[0].uuid
net_ops.add_nic_to_load_balancer(lbl_uuid, [vm_nic_uuid], session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
net_ops.remove_nic_from_load_balancer(lbl_uuid, [vm_nic_uuid], session_uuid=project_login_session_uuid)
net_ops.refresh_load_balancer(lb_uuid, session_uuid=project_login_session_uuid)
net_ops.delete_load_balancer_listener(lbl_uuid, session_uuid=project_login_session_uuid)
net_ops.delete_load_balancer(lb_uuid, session_uuid=project_login_session_uuid)
net_ops.delete_vip(vip.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
if lb_service_need_detach:
#net_ops.detach_lb_service_from_l3network(l3_net_uuid, service_providor_uuid, session_uuid=project_login_session_uuid)
net_ops.detach_lb_service_from_l3network(l3_net_uuid, service_providor_uuid)
acc_ops.revoke_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid, image_uuid, instance_offering_uuid])
return self.judge(True)
else:
try:
net_ops.delete_load_balancer_listener(lbl_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_load_balancer_listener should not be runned")
return 1
except Exception as e:
pass
try:
net_ops.delete_load_balancer(lb_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_load_balancer should not be runned")
return 1
except Exception as e:
pass
try:
net_ops.delete_vip(vip.uuid, project_login_session_uuid)
test_util.test_logger("delete_vip should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.destroy_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
try:
if lb_service_need_detach:
net_ops.detach_lb_service_from_l3network(l3_net_uuid, service_providor_uuid, session_uuid=project_login_session_uuid)
except Exception as e:
pass
#acc_ops.revoke_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid, image_uuid, instance_offering_uuid])
def check_port_forwarding(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
conditions = res_ops.gen_query_conditions('category', '=', 'Public')
l3_pub_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
conditions = res_ops.gen_query_conditions('category', '=', 'Private', conditions)
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
conditions = res_ops.gen_query_conditions('name', '=', 'vrouter')
pf_service_providor_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid])
conditions = res_ops.gen_query_conditions('l3Network.uuid', '=', l3_net_uuid)
network_service_list = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER_L3_REF, conditions)
pf_service_need_attach = True
pf_service_need_detach = False
for service in network_service_list:
if service.networkServiceType == 'PortForwarding':
pf_service_need_attach = False
if pf_service_need_attach:
#net_ops.attach_pf_service_to_l3network(l3_net_uuid, pf_service_providor_uuid, session_uuid=project_login_session_uuid)
net_ops.attach_pf_service_to_l3network(l3_net_uuid, pf_service_providor_uuid)
pf_service_need_detach = True
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_for_port_forwarding_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
if self.test_obj.get_customized() == None:
res_ops.change_recource_owner(project_linked_account_uuid, vm_uuid)
conditions = res_ops.gen_query_conditions('vmInstance.uuid', '=', vm_uuid)
vm_nic_uuid = res_ops.query_resource(res_ops.VM_NIC, conditions)[0].uuid
vip_option = test_util.VipOption()
vip_option.set_name("vip_for_port_forwarding_policy_checker")
vip_option.set_session_uuid(project_login_session_uuid)
vip_option.set_l3_uuid(l3_pub_uuid)
vip = net_ops.create_vip(vip_option)
pf_rule_creation_option = test_util.PortForwardingRuleOption()
pf_rule_creation_option.set_vip_uuid(vip.uuid)
pf_rule_creation_option.set_protocol('TCP')
pf_rule_creation_option.set_vip_ports('8080', '8088')
pf_rule_creation_option.set_private_ports('8080', '8088')
pf_rule_creation_option.set_name('port_forwarding_rule_policy_checker')
pf_rule_creation_option.set_session_uuid(session_uuid=project_login_session_uuid)
pf_rule_uuid = net_ops.create_port_forwarding(pf_rule_creation_option).uuid
net_ops.attach_port_forwarding(pf_rule_uuid, vm_nic_uuid, session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
net_ops.detach_port_forwarding(pf_rule_uuid, session_uuid=project_login_session_uuid)
net_ops.delete_port_forwarding(pf_rule_uuid, session_uuid=project_login_session_uuid)
net_ops.delete_vip(vip.uuid)
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
if pf_service_need_detach:
#net_ops.detach_pf_service_from_l3network(l3_net_uuid, pf_service_providor_uuid, session_uuid=project_login_session_uuid)
net_ops.detach_pf_service_from_l3network(l3_net_uuid, pf_service_providor_uuid)
acc_ops.revoke_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid, image_uuid, instance_offering_uuid])
return self.judge(True)
else:
try:
net_ops.delete_port_forwarding(pf_rule_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_port_forwarding should not be runned")
return 1
except Exception as e:
pass
try:
net_ops.delete_vip(vip.uuid, project_login_session_uuid)
test_util.test_logger("delete_vip should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.destroy_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
if pf_service_need_detach:
net_ops.detach_pf_service_from_l3network(l3_net_uuid, pf_service_providor_uuid)
#acc_ops.revoke_resources([project_linked_account_uuid], [l3_pub_uuid, l3_net_uuid, image_uuid, instance_offering_uuid])
def check_scheduler(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
vm_creation_option = test_util.VmOption()
conditions = res_ops.gen_query_conditions('system', '=', 'false')
l3_net_uuid = res_ops.query_resource(res_ops.L3_NETWORK, conditions)[0].uuid
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [l3_net_uuid])
vm_creation_option.set_l3_uuids([l3_net_uuid])
conditions = res_ops.gen_query_conditions('platform', '=', 'Linux')
conditions = res_ops.gen_query_conditions('system', '=', 'false', conditions)
image_uuid = res_ops.query_resource(res_ops.IMAGE, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [image_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
if self.test_obj.get_customized() == None:
acc_ops.share_resources([project_linked_account_uuid], [instance_offering_uuid])
vm_creation_option.set_name('vm_for_scheduler_policy_checker')
vm = vm_ops.create_vm(vm_creation_option)
vm_uuid = vm.uuid
if self.test_obj.get_customized() == None:
res_ops.change_recource_owner(project_linked_account_uuid, vm_uuid)
start_date = int(time.time())
schd_job = schd_ops.create_scheduler_job('start_vm_scheduler_policy_checker', 'start vm scheduler policy checker', vm_uuid, 'startVm', None, session_uuid=project_login_session_uuid)
schd_trigger = schd_ops.create_scheduler_trigger('start_vm_scheduler_policy_checker', start_date+5, None, 15, 'simple', session_uuid=project_login_session_uuid)
schd_ops.add_scheduler_job_to_trigger(schd_trigger.uuid, schd_job.uuid, session_uuid=project_login_session_uuid)
schd_ops.change_scheduler_state(schd_job.uuid, 'disable', session_uuid=project_login_session_uuid)
schd_ops.change_scheduler_state(schd_job.uuid, 'enable', session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
schd_ops.remove_scheduler_job_from_trigger(schd_trigger.uuid, schd_job.uuid, session_uuid=project_login_session_uuid)
schd_ops.del_scheduler_job(schd_job.uuid, session_uuid=project_login_session_uuid)
schd_ops.del_scheduler_trigger(schd_trigger.uuid, session_uuid=project_login_session_uuid)
schd_ops.get_current_time()
vm_ops.destroy_vm(vm_uuid)
vm_ops.expunge_vm(vm_uuid)
return self.judge(True)
else:
try:
schd_ops.del_scheduler_job(schd_job.uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("del_scheduler_job should not be runned with project_login_session_uuid")
return 1
except Exception as e:
pass
try:
schd_ops.del_scheduler_trigger(schd_trigger.uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("del_scheduler_trigger should not be runned with project_login_session_uuid")
return 1
except Exception as e:
pass
schd_ops.get_current_time()
try:
vm_ops.destroy_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("destroy_vm should not be runned")
return 1
except Exception as e:
pass
try:
vm_ops.expunge_vm(vm_uuid, project_login_session_uuid)
test_util.test_logger("expunge_vm should not be runned")
return 1
except Exception as e:
pass
def check_pci(self):
#Haven't simulator pci device, skip to check
pass
def check_zwatch(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
http_endpoint_name='http_endpoint_for zwatch_policy_checker'
url = 'http://localhost:8080/webhook-url'
http_endpoint=zwt_ops.create_sns_http_endpoint(url, http_endpoint_name, session_uuid=project_login_session_uuid)
http_endpoint_uuid=http_endpoint.uuid
sns_topic_uuid = zwt_ops.create_sns_topic('sns_topic_for zwatch_policy_checker', session_uuid=project_login_session_uuid).uuid
sns_topic_uuid1 = zwt_ops.create_sns_topic('sns_topic_for zwatch_policy_checker_01', session_uuid=project_login_session_uuid).uuid
zwt_ops.subscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=project_login_session_uuid)
namespace = 'ZStack/VM'
actions = [{"actionUuid": sns_topic_uuid, "actionType": "sns"}]
period = 60
comparison_operator = 'GreaterThanOrEqualTo'
threshold = 10
metric_name = 'CPUUsedUtilization'
labels = [{"key": "NewState", "op": "Equal", "value": "Disconnected"}]
event_name = 'VMStateChangedOnHost'
alarm_uuid = zwt_ops.create_alarm(comparison_operator, period, threshold, namespace, metric_name, session_uuid=project_login_session_uuid).uuid
event_sub_uuid = zwt_ops.subscribe_event(namespace, event_name, actions, labels, session_uuid=project_login_session_uuid).uuid
zwt_ops.update_alarm(alarm_uuid, comparison_operator='GreaterThan', session_uuid=project_login_session_uuid)
zwt_ops.update_sns_application_endpoint(http_endpoint_uuid, 'new_endpoint_name', 'new description', session_uuid=project_login_session_uuid)
zwt_ops.add_action_to_alarm(alarm_uuid, sns_topic_uuid1, 'sns', session_uuid=project_login_session_uuid)
#zwt_ops.remove_action_from_alarm(alarm_uuid, sns_topic_uuid, session_uuid=project_login_session_uuid)
zwt_ops.change_alarm_state(alarm_uuid, 'disable', session_uuid=project_login_session_uuid)
zwt_ops.change_sns_topic_state(sns_topic_uuid, 'disable', session_uuid=project_login_session_uuid)
zwt_ops.change_sns_application_endpoint_state(http_endpoint_uuid, 'disable', session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
zwt_ops.remove_action_from_alarm(alarm_uuid, sns_topic_uuid, session_uuid=project_login_session_uuid)
zwt_ops.delete_alarm(alarm_uuid, session_uuid=project_login_session_uuid)
zwt_ops.unsubscribe_event(event_sub_uuid, session_uuid=project_login_session_uuid)
zwt_ops.unsubscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=project_login_session_uuid)
zwt_ops.delete_sns_topic(sns_topic_uuid, session_uuid=project_login_session_uuid)
zwt_ops.delete_sns_topic(sns_topic_uuid1, session_uuid=project_login_session_uuid)
zwt_ops.delete_sns_application_endpoint(http_endpoint_uuid, session_uuid=project_login_session_uuid)
return self.judge(True)
else:
try:
zwt_ops.delete_alarm(alarm_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_alarm should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.unsubscribe_event(event_sub_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("unsubscribe_event should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.unsubscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("unsubscribe_sns_topic should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.delete_sns_topic(sns_topic_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_sns_topic should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.delete_sns_topic(sns_topic_uuid1, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_sns_topic should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.delete_sns_application_endpoint(http_endpoint_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_sns_application_endpoint should not be runned")
return 1
except Exception as e:
pass
def check_sns(self):
virtual_id = self.test_obj.get_vid()
plain_user_session_uuid = iam2_ops.login_iam2_virtual_id(virtual_id.name, self.password)
if self.test_obj.get_customized() == None:
conditions = res_ops.gen_query_conditions('virtualIDs.uuid', '=',virtual_id.uuid)
project_ins = res_ops.query_resource(res_ops.IAM2_PROJECT, conditions)[0]
project_login_session_uuid = iam2_ops.login_iam2_project(project_ins.name, plain_user_session_uuid).uuid
project_linked_account_uuid = project_ins.linkedAccountUuid
else:
project_login_session_uuid = plain_user_session_uuid
http_endpoint_name='http_endpoint_for zwatch_policy_checker'
url = 'http://localhost:8080/webhook-url'
http_endpoint=zwt_ops.create_sns_http_endpoint(url, http_endpoint_name)
http_endpoint_uuid=http_endpoint.uuid
sns_topic_uuid = zwt_ops.create_sns_topic('sns_topic_for zwatch_policy_checker', session_uuid=project_login_session_uuid).uuid
sns_topic_uuid1 = zwt_ops.create_sns_topic('sns_topic_for zwatch_policy_checker_01', session_uuid=project_login_session_uuid).uuid
zwt_ops.subscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=project_login_session_uuid)
namespace = 'ZStack/VM'
actions = [{"actionUuid": sns_topic_uuid, "actionType": "sns"}]
period = 60
comparison_operator = 'GreaterThanOrEqualTo'
threshold = 10
metric_name = 'CPUUsedUtilization'
alarm_uuid = zwt_ops.create_alarm(comparison_operator, period, threshold, namespace, metric_name).uuid
labels = [{"key": "NewState", "op": "Equal", "value": "Disconnected"}]
event_name = 'VMStateChangedOnHost'
event_sub_uuid = zwt_ops.subscribe_event(namespace, event_name, actions, labels).uuid
zwt_ops.update_sns_application_endpoint(http_endpoint_uuid, 'new_endpoint_name', 'new description', session_uuid=project_login_session_uuid)
zwt_ops.add_action_to_alarm(alarm_uuid, sns_topic_uuid1, 'sns')
#zwt_ops.remove_action_from_alarm(alarm_uuid, sns_topic_uuid)
zwt_ops.change_sns_topic_state(sns_topic_uuid, 'disable', session_uuid=project_login_session_uuid)
zwt_ops.change_sns_application_endpoint_state(http_endpoint_uuid, 'disable', session_uuid=project_login_session_uuid)
if self.test_obj.get_customized() == None:
zwt_ops.remove_action_from_alarm(alarm_uuid, sns_topic_uuid)
zwt_ops.delete_alarm(alarm_uuid)
zwt_ops.unsubscribe_event(event_sub_uuid)
zwt_ops.unsubscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=project_login_session_uuid)
zwt_ops.delete_sns_topic(sns_topic_uuid, session_uuid=project_login_session_uuid)
zwt_ops.delete_sns_topic(sns_topic_uuid1, session_uuid=project_login_session_uuid)
zwt_ops.delete_sns_application_endpoint(http_endpoint_uuid, session_uuid=project_login_session_uuid)
return self.judge(True)
else:
try:
zwt_ops.remove_action_from_alarm(alarm_uuid, sns_topic_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("remove_action_from_alarm should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.delete_alarm(alarm_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_alarm should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.unsubscribe_event(event_sub_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("unsubscribe_event should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.unsubscribe_sns_topic(sns_topic_uuid, http_endpoint_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("unsubscribe_sns_topic should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.delete_sns_topic(sns_topic_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_sns_topic should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.delete_sns_topic(sns_topic_uuid1, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_sns_topic should not be runned")
return 1
except Exception as e:
pass
try:
zwt_ops.delete_sns_application_endpoint(http_endpoint_uuid, session_uuid=project_login_session_uuid)
test_util.test_logger("delete_sns_application_endpoint should not be runned")
return 1
except Exception as e:
pass
def check_no_delete_admin_permission(self):
test_util.test_logger("check_no_delete_admin_permission")
retCode = self.check_vm_operation()
if retCode == 1:
test_util.test_fail("check_vm_operation failed")
retCode = self.check_image_operation()
if retCode == 1:
test_util.test_fail("check_image_operation failed")
retCode = self.check_snapshot()
if retCode == 1:
test_util.test_fail("check_snapshot failed")
retCode = self.check_volume_operation()
if retCode == 1:
test_util.test_fail("check_volume_operation failed")
retCode = self.check_affinity_group()
if retCode == 1:
test_util.test_fail("check_affinity_group failed")
retCode = self.check_networks()
if retCode == 1:
test_util.test_fail("check_networks failed")
retCode = self.check_eip()
if retCode == 1:
test_util.test_fail("check_eip failed")
retCode = self.check_security_group()
if retCode == 1:
test_util.test_fail("check_security_group failed")
retCode = self.check_load_balancer()
if retCode == 1:
test_util.test_fail("check_load_balancer failed")
retCode = self.check_port_forwarding()
if retCode == 1:
test_util.test_fail("check_port_forwarding failed")
retCode = self.check_scheduler()
if retCode == 1:
test_util.test_fail("check_scheduler failed")
retCode = self.check_pci()
if retCode == 1:
test_util.test_fail("check_pci failed")
retCode = self.check_zwatch()
if retCode == 1:
test_util.test_fail("check_zwatch failed")
retCode = self.check_sns()
if retCode == 1:
test_util.test_fail("check_sns failed")
def check(self):
super(zstack_vid_policy_checker, self).check()
password = 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86'
virtual_id = self.test_obj.get_vid()
self.check_login(virtual_id.name, password)
actions = self.test_obj.get_vid_statements()[0]['actions']
effect = self.test_obj.get_vid_statements()[0]['effect']
customized = self.test_obj.get_customized()
checker_runned = False
if customized == "noDeleteAdminPermission":
self.check_no_delete_admin_permission()
self.judge(True)
if effect == "Allow" and \
'org.zstack.header.vm.**' in actions and \
'org.zstack.ha.**' in actions:
checker_runned = True
self.check_vm_operation()
if effect == "Allow" and \
'org.zstack.header.image.**' in actions and \
'org.zstack.storage.backup.imagestore.APIGetImagesFromImageStoreBackupStorageMsg' in actions:
checker_runned = True
self.check_image_operation()
if effect == "Allow" and \
'org.zstack.header.volume.APICreateDataVolumeFromVolumeTemplateMsg' in actions and \
'org.zstack.header.volume.APIGetVolumeQosMsg' in actions and \
'org.zstack.header.volume.APISyncVolumeSizeMsg' in actions and \
'org.zstack.header.volume.APICreateDataVolumeFromVolumeSnapshotMsg' in actions and \
'org.zstack.header.volume.APIResizeDataVolumeMsg' in actions and \
'org.zstack.header.volume.APIRecoverDataVolumeMsg' in actions and \
'org.zstack.header.volume.APIExpungeDataVolumeMsg' in actions and \
'org.zstack.mevoco.APIQueryShareableVolumeVmInstanceRefMsg' in actions and \
'org.zstack.header.volume.APICreateDataVolumeMsg' in actions and \
'org.zstack.header.volume.APIGetVolumeCapabilitiesMsg' in actions and \
'org.zstack.header.volume.APIDetachDataVolumeFromVmMsg' in actions and \
'org.zstack.header.volume.APIDeleteVolumeQosMsg' in actions and \
'org.zstack.header.volume.APIGetVolumeFormatMsg' in actions and \
'org.zstack.header.volume.APIGetDataVolumeAttachableVmMsg' in actions and \
'org.zstack.header.volume.APIAttachDataVolumeToVmMsg' in actions and \
'org.zstack.header.volume.APIResizeRootVolumeMsg' in actions and \
'org.zstack.header.volume.APISetVolumeQosMsg' in actions and \
'org.zstack.header.volume.APIDeleteDataVolumeMsg' in actions and \
'org.zstack.header.volume.APIUpdateVolumeMsg' in actions and \
'org.zstack.header.volume.APIChangeVolumeStateMsg' in actions and \
'org.zstack.header.volume.APIQueryVolumeMsg' in actions:
checker_runned = True
self.check_volume_operation()
if effect == "Allow" and \
'org.zstack.header.vm.**' not in actions and \
'org.zstack.header.vm.APIGetVmQgaMsg' in actions and \
'org.zstack.header.vm.APIChangeVmImageMsg' in actions and \
'org.zstack.header.vm.APISetVmSshKeyMsg' in actions and \
'org.zstack.header.vm.APIStopVmInstanceMsg' in actions and \
'org.zstack.header.vm.APISetVmStaticIpMsg' in actions and \
'org.zstack.header.vm.APIRecoverVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIQueryVmNicMsg' in actions and \
'org.zstack.header.vm.APIStartVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIDestroyVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIGetVmConsolePasswordMsg' in actions and \
'org.zstack.header.vm.APIDeleteVmStaticIpMsg' in actions and \
'org.zstack.header.vm.APISetNicQosMsg' in actions and \
'org.zstack.header.vm.APIRebootVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIGetNicQosMsg' in actions and \
'org.zstack.header.vm.APIGetVmBootOrderMsg' in actions and \
'org.zstack.header.vm.APIChangeVmPasswordMsg' in actions and \
'org.zstack.header.vm.APIGetCandidatePrimaryStoragesForCreatingVmMsg' in actions and \
'org.zstack.header.vm.APISetVmRDPMsg' in actions and \
'org.zstack.header.vm.APIMigrateVmMsg' in actions and \
'org.zstack.header.vm.APIGetVmMigrationCandidateHostsMsg' in actions and \
'org.zstack.header.vm.APIAttachL3NetworkToVmMsg' in actions and \
'org.zstack.header.vm.APIExpungeVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIGetCandidateVmForAttachingIsoMsg' in actions and \
'org.zstack.header.vm.APIAttachIsoToVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIGetVmAttachableL3NetworkMsg' in actions and \
'org.zstack.header.vm.APIGetVmHostnameMsg' in actions and \
'org.zstack.header.vm.APIDeleteVmSshKeyMsg' in actions and \
'org.zstack.header.vm.APIGetVmMonitorNumberMsg' in actions and \
'org.zstack.header.vm.APISetVmQgaMsg' in actions and \
'org.zstack.header.vm.APIDetachL3NetworkFromVmMsg' in actions and \
'org.zstack.header.vm.APISetVmConsolePasswordMsg' in actions and \
'org.zstack.header.vm.APIGetCandidateZonesClustersHostsForCreatingVmMsg' in actions and \
'org.zstack.header.vm.APIGetVmAttachableDataVolumeMsg' in actions and \
'org.zstack.header.vm.APIGetInterdependentL3NetworksImagesMsg' in actions and \
'org.zstack.header.vm.APIGetCandidateIsoForAttachingVmMsg' in actions and \
'org.zstack.header.vm.APIDeleteNicQosMsg' in actions and \
'org.zstack.header.vm.APISetVmUsbRedirectMsg' in actions and \
'org.zstack.header.vm.APISetVmBootOrderMsg' in actions and \
'org.zstack.header.vm.APIGetImageCandidatesForVmToChangeMsg' in actions and \
'org.zstack.header.vm.APIGetVmConsoleAddressMsg' in actions and \
'org.zstack.header.vm.APIChangeInstanceOfferingMsg' in actions and \
'org.zstack.header.vm.APIDeleteVmHostnameMsg' in actions and \
'org.zstack.header.vm.APIGetVmUsbRedirectMsg' in actions and \
'org.zstack.header.vm.APIQueryVmInstanceMsg' in actions and \
'org.zstack.header.vm.APISetVmMonitorNumberMsg' in actions and \
'org.zstack.header.vm.APIReimageVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIResumeVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIUpdateVmNicMacMsg' in actions and \
'org.zstack.header.vm.APIGetVmCapabilitiesMsg' in actions and \
'org.zstack.header.vm.APIUpdateVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIGetVmSshKeyMsg' in actions and \
'org.zstack.header.vm.APICloneVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIDeleteVmConsolePasswordMsg' in actions and \
'org.zstack.header.vm.APISetVmHostnameMsg' in actions and \
'org.zstack.header.vm.APIGetVmStartingCandidateClustersHostsMsg' in actions and \
'org.zstack.header.vm.APIDetachIsoFromVmInstanceMsg' in actions and \
'org.zstack.header.vm.APIGetVmRDPMsg' in actions and \
'org.zstack.header.vm.APIPauseVmInstanceMsg' in actions:
checker_runned = True
self.check_vm_operation_without_create_permission()
if effect == "Allow" and \
'org.zstack.header.storage.snapshot.**' in actions and \
'org.zstack.header.volume.APICreateVolumeSnapshotMsg' in actions:
checker_runned = True
self.check_snapshot()
if effect == "Allow" and \
'org.zstack.header.affinitygroup.**' in actions:
checker_runned = True
self.check_affinity_group()
if effect == "Allow" and \
'org.zstack.header.network.l3.**' in actions and \
'org.zstack.network.service.flat.**' in actions and \
'org.zstack.header.network.l2.APIUpdateL2NetworkMsg' in actions and \
'org.zstack.header.network.service.APIQueryNetworkServiceProviderMsg' in actions and \
'org.zstack.header.network.service.APIAttachNetworkServiceToL3NetworkMsg' in actions and \
'org.zstack.network.l2.vxlan.vxlanNetworkPool.APIQueryVniRangeMsg' in actions and \
'org.zstack.network.l2.vxlan.vxlanNetwork.APIQueryL2VxlanNetworkMsg' in actions and \
'org.zstack.network.l2.vxlan.vxlanNetwork.APICreateL2VxlanNetworkMsg' in actions and \
'org.zstack.network.l2.vxlan.vxlanNetworkPool.APIQueryL2VxlanNetworkPoolMsg' in actions:
checker_runned = True
self.check_networks()
if effect == "Allow" and \
'org.zstack.network.service.vip.**' in actions and \
'org.zstack.network.service.eip.**' in actions and \
'org.zstack.header.vipQos.**' in actions:
checker_runned = True
self.check_eip()
if effect == "Allow" and \
'org.zstack.network.securitygroup.**' in actions:
checker_runned = True
self.check_security_group()
if effect == "Allow" and \
'org.zstack.network.service.lb.**' in actions and \
'org.zstack.network.service.vip.**' in actions and \
'org.zstack.header.vipQos.**' in actions:
checker_runned = True
self.check_load_balancer()
if effect == "Allow" and \
'org.zstack.network.service.portforwarding.**' in actions and \
'org.zstack.network.service.vip.**' in actions and \
'org.zstack.header.vipQos.**' in actions:
checker_runned = True
self.check_port_forwarding()
if effect == "Allow" and \
'org.zstack.scheduler.**' in actions:
checker_runned = True
self.check_scheduler()
if effect == "Allow" and \
'org.zstack.pciDevice.APIAttachPciDeviceToVmMsg' in actions and \
'org.zstack.pciDevice.APIUpdateHostIommuStateMsg' in actions and \
'org.zstack.pciDevice.APIGetPciDeviceCandidatesForNewCreateVmMsg' in actions and \
'org.zstack.pciDevice.APIDetachPciDeviceFromVmMsg' in actions and \
'org.zstack.pciDevice.APIQueryPciDeviceMsg' in actions and \
'org.zstack.pciDevice.APIGetPciDeviceCandidatesForAttachingVmMsg' in actions:
checker_runned = True
self.check_pci()
if effect == "Allow" and \
'org.zstack.zwatch.**' in actions and \
'org.zstack.sns.**' in actions:
checker_runned = True
self.check_zwatch()
if effect == "Allow" and \
'org.zstack.sns.**' in actions:
checker_runned = True
self.check_sns()
return self.judge(True)
| zstackio/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/zstack_test/vid_checker/zstack_vid_checker.py | Python | apache-2.0 | 135,498 | 0.006295 |
from .base_reader import BaseReader, InvalidDataDirectory # noqa
from .object_detection import ObjectDetectionReader # noqa
from .object_detection import (
COCOReader, CSVReader, FlatReader, ImageNetReader, OpenImagesReader,
PascalVOCReader, TaggerineReader
)
READERS = {
'coco': COCOReader,
'csv': CSVReader,
'flat': FlatReader,
'imagenet': ImageNetReader,
'openimages': OpenImagesReader,
'pascal': PascalVOCReader,
'taggerine': TaggerineReader,
}
def get_reader(reader):
reader = reader.lower()
if reader not in READERS:
raise ValueError('"{}" is not a valid reader'.format(reader))
return READERS[reader]
| tryolabs/luminoth | luminoth/tools/dataset/readers/__init__.py | Python | bsd-3-clause | 670 | 0 |
# Copyright (c) 2015, Nordic Semiconductor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.s
"""Package marker file."""
| strobo-inc/pc-nrfutil | nordicsemi/utility/__init__.py | Python | bsd-3-clause | 1,580 | 0.000633 |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import importlib
import os
from os import path
import pkgutil
import shutil
import sys
import tempfile
import threading
import unittest
from six import moves
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.foundation import future
from grpc.framework.interfaces.face import face
from grpc_tools import protoc
from tests.unit.framework.common import test_constants
_RELATIVE_PROTO_PATH = 'relative_proto_path'
_RELATIVE_PYTHON_OUT = 'relative_python_out'
_PROTO_FILES_PATH_COMPONENTS = (
(
'beta_grpc_plugin_test',
'payload',
'test_payload.proto',
),
(
'beta_grpc_plugin_test',
'requests',
'r',
'test_requests.proto',
),
(
'beta_grpc_plugin_test',
'responses',
'test_responses.proto',
),
(
'beta_grpc_plugin_test',
'service',
'test_service.proto',
),
)
_PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2'
_REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2'
_RESPONSES_PB2 = 'beta_grpc_plugin_test.responses.test_responses_pb2'
_SERVICE_PB2 = 'beta_grpc_plugin_test.service.test_service_pb2'
# Identifiers of entities we expect to find in the generated module.
SERVICER_IDENTIFIER = 'BetaTestServiceServicer'
STUB_IDENTIFIER = 'BetaTestServiceStub'
SERVER_FACTORY_IDENTIFIER = 'beta_create_TestService_server'
STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub'
@contextlib.contextmanager
def _system_path(path_insertion):
old_system_path = sys.path[:]
sys.path = sys.path[0:1] + path_insertion + sys.path[1:]
yield
sys.path = old_system_path
def _create_directory_tree(root, path_components_sequence):
created = set()
for path_components in path_components_sequence:
thus_far = ''
for path_component in path_components:
relative_path = path.join(thus_far, path_component)
if relative_path not in created:
os.makedirs(path.join(root, relative_path))
created.add(relative_path)
thus_far = path.join(thus_far, path_component)
def _massage_proto_content(raw_proto_content):
imports_substituted = raw_proto_content.replace(
b'import "tests/protoc_plugin/protos/',
b'import "beta_grpc_plugin_test/')
package_statement_substituted = imports_substituted.replace(
b'package grpc_protoc_plugin;', b'package beta_grpc_protoc_plugin;')
return package_statement_substituted
def _packagify(directory):
for subdirectory, _, _ in os.walk(directory):
init_file_name = path.join(subdirectory, '__init__.py')
with open(init_file_name, 'wb') as init_file:
init_file.write(b'')
class _ServicerMethods(object):
def __init__(self, payload_pb2, responses_pb2):
self._condition = threading.Condition()
self._paused = False
self._fail = False
self._payload_pb2 = payload_pb2
self._responses_pb2 = responses_pb2
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = self._responses_pb2.SimpleResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = self._responses_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = self._responses_pb2.StreamingOutputCallResponse()
response.payload.payload_type = self._payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
@contextlib.contextmanager
def _CreateService(payload_pb2, responses_pb2, service_pb2):
"""Provides a servicer backend and a stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
servicer_methods = _ServicerMethods(payload_pb2, responses_pb2)
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield servicer_methods, stub
server.stop(0)
@contextlib.contextmanager
def _CreateIncompleteService(service_pb2):
"""Provides a servicer backend that fails to implement methods and its stub.
The servicer is just the implementation of the actual servicer passed to the
face player of the python RPC implementation; the two are detached.
Args:
service_pb2: The service_pb2 module generated by this test.
Yields:
A (servicer_methods, stub) pair where servicer_methods is the back-end of
the service bound to the stub and stub is the stub on which to invoke
RPCs.
"""
class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):
pass
servicer = Servicer()
server = getattr(service_pb2, SERVER_FACTORY_IDENTIFIER)(servicer)
port = server.add_insecure_port('[::]:0')
server.start()
channel = implementations.insecure_channel('localhost', port)
stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel)
yield None, stub
server.stop(0)
def _streaming_input_request_iterator(payload_pb2, requests_pb2):
for _ in range(3):
request = requests_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request(requests_pb2):
request = requests_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator(requests_pb2):
request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def setUp(self):
self._directory = tempfile.mkdtemp(dir='.')
self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH)
self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT)
os.makedirs(self._proto_path)
os.makedirs(self._python_out)
directories_path_components = {
proto_file_path_components[:-1]
for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS
}
_create_directory_tree(self._proto_path, directories_path_components)
self._proto_file_names = set()
for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS:
raw_proto_content = pkgutil.get_data(
'tests.protoc_plugin.protos',
path.join(*proto_file_path_components[1:]))
massaged_proto_content = _massage_proto_content(raw_proto_content)
proto_file_name = path.join(self._proto_path,
*proto_file_path_components)
with open(proto_file_name, 'wb') as proto_file:
proto_file.write(massaged_proto_content)
self._proto_file_names.add(proto_file_name)
def tearDown(self):
shutil.rmtree(self._directory)
def _protoc(self):
args = [
'',
'--proto_path={}'.format(self._proto_path),
'--python_out={}'.format(self._python_out),
'--grpc_python_out=grpc_1_0:{}'.format(self._python_out),
] + list(self._proto_file_names)
protoc_exit_code = protoc.main(args)
self.assertEqual(0, protoc_exit_code)
_packagify(self._python_out)
with _system_path([self._python_out]):
self._payload_pb2 = importlib.import_module(_PAYLOAD_PB2)
self._requests_pb2 = importlib.import_module(_REQUESTS_PB2)
self._responses_pb2 = importlib.import_module(_RESPONSES_PB2)
self._service_pb2 = importlib.import_module(_SERVICE_PB2)
def testImportAttributes(self):
self._protoc()
# check that we can access the generated module and its members.
self.assertIsNotNone(
getattr(self._service_pb2, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(getattr(self._service_pb2, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None))
self.assertIsNotNone(
getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None))
def testUpDown(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2):
self._requests_pb2.SimpleRequest(response_size=13)
def testIncompleteServicer(self):
self._protoc()
with _CreateIncompleteService(self._service_pb2) as (_, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
try:
stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
except face.AbortionError as error:
self.assertEqual(interfaces.StatusCode.UNIMPLEMENTED,
error.code)
def testUnaryCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT)
expected_response = methods.UnaryCall(request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallFuture(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.UnaryCall(request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testUnaryCallFutureExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
def testUnaryCallFutureCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.pause():
response_future = stub.UnaryCall.future(request, 1)
response_future.cancel()
self.assertTrue(response_future.cancelled())
def testUnaryCallFutureFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = self._requests_pb2.SimpleRequest(response_size=13)
with methods.fail():
response_future = stub.UnaryCall.future(
request, test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testStreamingOutputCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
expected_responses = methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testStreamingOutputCallExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
with methods.pause():
responses = stub.StreamingOutputCall(
request, test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(responses)
def testStreamingOutputCallCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
responses = stub.StreamingOutputCall(request,
test_constants.LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(face.CancellationError):
next(responses)
def testStreamingOutputCallFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request = _streaming_output_request(self._requests_pb2)
with methods.fail():
responses = stub.StreamingOutputCall(request, 1)
self.assertIsNotNone(responses)
with self.assertRaises(face.RemoteError):
next(responses)
def testStreamingInputCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
response = stub.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFuture(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
response = response_future.result()
expected_response = methods.StreamingInputCall(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testStreamingInputCallFutureExpired(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
response_future.result()
self.assertIsInstance(response_future.exception(),
face.ExpirationError)
def testStreamingInputCallFutureCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(future.CancelledError):
response_future.result()
def testStreamingInputCallFutureFailed(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.fail():
response_future = stub.StreamingInputCall.future(
_streaming_input_request_iterator(self._payload_pb2,
self._requests_pb2),
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(response_future.exception())
def testFullDuplexCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
responses = stub.FullDuplexCall(
_full_duplex_request_iterator(self._requests_pb2),
test_constants.LONG_TIMEOUT)
expected_responses = methods.FullDuplexCall(
_full_duplex_request_iterator(self._requests_pb2),
'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testFullDuplexCallExpired(self):
self._protoc()
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.pause():
responses = stub.FullDuplexCall(request_iterator,
test_constants.SHORT_TIMEOUT)
with self.assertRaises(face.ExpirationError):
list(responses)
def testFullDuplexCallCancelled(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
next(responses)
responses.cancel()
with self.assertRaises(face.CancellationError):
next(responses)
def testFullDuplexCallFailed(self):
self._protoc()
request_iterator = _full_duplex_request_iterator(self._requests_pb2)
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with methods.fail():
responses = stub.FullDuplexCall(request_iterator,
test_constants.LONG_TIMEOUT)
self.assertIsNotNone(responses)
with self.assertRaises(face.RemoteError):
next(responses)
def testHalfDuplexCall(self):
self._protoc()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
def half_duplex_request_iterator():
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.LONG_TIMEOUT)
expected_responses = methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for check in moves.zip_longest(expected_responses, responses):
expected_response, response = check
self.assertEqual(expected_response, response)
def testHalfDuplexCallWedged(self):
self._protoc()
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = self._requests_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
with _CreateService(self._payload_pb2, self._responses_pb2,
self._service_pb2) as (methods, stub):
with wait():
responses = stub.HalfDuplexCall(half_duplex_request_iterator(),
test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(face.ExpirationError):
next(responses)
if __name__ == '__main__':
unittest.main(verbosity=2)
| endlessm/chromium-browser | third_party/grpc/src/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py | Python | bsd-3-clause | 26,919 | 0.000186 |
id_mappings = {
"EX1_097": "Abomination",
"CS2_188": "Abusive Sergeant",
"EX1_007": "Acolyte of Pain",
"NEW1_010": "Al'Akir the Windlord",
"EX1_006": "Alarm-o-Bot",
"EX1_382": "Aldor Peacekeeper",
"EX1_561": "Alexstrasza",
"EX1_393": "Amani Berserker",
"CS2_038": "Ancestral Spirit",
"EX1_057": "Ancient Brewmaster",
"EX1_584": "Ancient Mage",
"NEW1_008b": "Ancient Secrets",
"NEW1_008a": "Ancient Teachings",
"EX1_045": "Ancient Watcher",
"NEW1_008": "Ancient of Lore",
"EX1_178": "Ancient of War",
"EX1_009": "Angry Chicken",
"EX1_398": "Arathi Weaponsmith",
"EX1_089": "Arcane Golem",
"EX1_559": "Archmage Antonidas",
"EX1_067": "Argent Commander",
"EX1_362": "Argent Protector",
"EX1_008": "Argent Squire",
"EX1_402": "Armorsmith",
"EX1_383t": "Ashbringer",
"EX1_591": "Auchenai Soulpriest",
"EX1_384": "Avenging Wrath",
"EX1_284": "Azure Drake",
"EX1_110t": "Baine Bloodhoof",
"EX1_014t": "Bananas",
"EX1_320": "Bane of Doom",
"EX1_249": "Baron Geddon",
"EX1_398t": "Battle Axe",
"EX1_392": "Battle Rage",
"EX1_165b": "Bear Form",
"EX1_549": "Bestial Wrath",
"EX1_126": "Betrayal",
"EX1_005": "Big Game Hunter",
"EX1_570": "Bite",
"CS2_233": "Blade Flurry",
"EX1_355": "Blessed Champion",
"EX1_363": "Blessing of Wisdom",
"CS2_028": "Blizzard",
"EX1_323w": "Blood Fury",
"CS2_059": "Blood Imp",
"EX1_590": "Blood Knight",
"EX1_012": "Bloodmage Thalnos",
"NEW1_025": "Bloodsail Corsair",
"NEW1_018": "Bloodsail Raider",
"EX1_407": "Brawl",
"EX1_091": "Cabal Shadow Priest",
"EX1_110": "Cairne Bloodhoof",
"NEW1_024": "Captain Greenskin",
"EX1_165a": "Cat Form",
"EX1_573": "Cenarius",
"EX1_621": "Circle of Healing",
"CS2_073": "Cold Blood",
"EX1_050": "Coldlight Oracle",
"EX1_103": "Coldlight Seer",
"NEW1_036": "Commanding Shout",
"EX1_128": "Conceal",
"EX1_275": "Cone of Cold",
"EX1_287": "Counterspell",
"EX1_059": "Crazed Alchemist",
"EX1_603": "Cruel Taskmaster",
"EX1_595": "Cult Master",
"skele21": "Damaged Golem",
"EX1_046": "Dark Iron Dwarf",
"EX1_617": "Deadly Shot",
"NEW1_030": "Deathwing",
"EX1_130a": "Defender",
"EX1_093": "Defender of Argus",
"EX1_131t": "Defias Bandit",
"EX1_131": "Defias Ringleader",
"EX1_573a": "Demigod's Favor",
"EX1_102": "Demolisher",
"EX1_596": "Demonfire",
"EX1_tk29": "Devilsaur",
"EX1_162": "Dire Wolf Alpha",
"EX1_166b": "Dispel",
"EX1_349": "Divine Favor",
"EX1_310": "Doomguard",
"EX1_567": "Doomhammer",
"NEW1_021": "Doomsayer",
"NEW1_022": "Dread Corsair",
"DREAM_04": "Dream",
"EX1_165t2": "Druid of the Claw (bear)",
"EX1_165": "Druid of the Claw",
"EX1_165t1": "Druid of the Claw (cat)",
"EX1_243": "Dust Devil",
"EX1_536": "Eaglehorn Bow",
"EX1_250": "Earth Elemental",
"EX1_245": "Earth Shock",
"CS2_117": "Earthen Ring Farseer",
"EX1_613": "Edwin VanCleef",
"DREAM_03": "Emerald Drake",
"EX1_170": "Emperor Cobra",
"EX1_619": "Equality",
"EX1_274": "Ethereal Arcanist",
"EX1_124": "Eviscerate",
"EX1_537": "Explosive Shot",
"EX1_610": "Explosive Trap",
"EX1_132": "Eye for an Eye",
"EX1_564": "Faceless Manipulator",
"NEW1_023": "Faerie Dragon",
"CS2_053": "Far Sight",
"EX1_301": "Felguard",
"CS1_069": "Fen Creeper",
"EX1_248": "Feral Spirit",
"EX1_finkle": "Finkle Einhorn",
"EX1_319": "Flame Imp",
"EX1_614t": "Flame of Azzinoth",
"EX1_544": "Flare",
"tt_004": "Flesheating Ghoul",
"EX1_571": "Force of Nature",
"EX1_251": "Forked Lightning",
"EX1_611": "Freezing Trap",
"EX1_283": "Frost Elemental",
"EX1_604": "Frothing Berserker",
"EX1_095": "Gadgetzan Auctioneer",
"DS1_188": "Gladiator's Longbow",
"NEW1_040t": "Gnoll",
"EX1_411": "Gorehowl",
"EX1_414": "Grommash Hellscream",
"NEW1_038": "Gruul",
"EX1_558": "Harrison Jones",
"EX1_556": "Harvest Golem",
"EX1_137": "Headcrack",
"EX1_409t": "Heavy Axe",
"NEW1_040": "Hogger",
"EX1_624": "Holy Fire",
"EX1_365": "Holy Wrath",
"EX1_538t": "Hound",
"NEW1_017": "Hungry Crab",
"EX1_534t": "Hyena",
"EX1_289": "Ice Barrier",
"EX1_295": "Ice Block",
"CS2_031": "Ice Lance",
"EX1_614": "Illidan Stormrage",
"EX1_598": "Imp",
"EX1_597": "Imp Master",
"EX1_tk34": "Infernal",
"CS2_181": "Injured Blademaster",
"CS1_129": "Inner Fire",
"EX1_607": "Inner Rage",
"CS2_203": "Ironbeak Owl",
"EX1_017": "Jungle Panther",
"EX1_166": "Keeper of the Grove",
"NEW1_005": "Kidnapper",
"EX1_543": "King Krush",
"EX1_014": "King Mukla",
"EX1_612": "Kirin Tor Mage",
"NEW1_019": "Knife Juggler",
"DREAM_01": "Laughing Sister",
"EX1_241": "Lava Burst",
"EX1_354": "Lay on Hands",
"EX1_160b": "Leader of the Pack",
"EX1_116": "Leeroy Jenkins",
"EX1_029": "Leper Gnome",
"EX1_238": "Lightning Bolt",
"EX1_259": "Lightning Storm",
"EX1_335": "Lightspawn",
"EX1_001": "Lightwarden",
"EX1_341": "Lightwell",
"EX1_096": "Loot Hoarder",
"EX1_323": "Lord Jaraxxus",
"EX1_100": "Lorewalker Cho",
"EX1_082": "Mad Bomber",
"EX1_563": "Malygos",
"EX1_055": "Mana Addict",
"EX1_575": "Mana Tide Totem",
"EX1_616": "Mana Wraith",
"NEW1_012": "Mana Wyrm",
"EX1_155": "Mark of Nature",
"EX1_155b": "Mark of Nature",
"EX1_155a": "Mark of Nature",
"EX1_626": "Mass Dispel",
"NEW1_037": "Master Swordsmith",
"NEW1_014": "Master of Disguise",
"NEW1_029": "Millhouse Manastorm",
"EX1_085": "Mind Control Tech",
"EX1_345": "Mindgames",
"EX1_294": "Mirror Entity",
"EX1_533": "Misdirection",
"EX1_396": "Mogu'shan Warden",
"EX1_620": "Molten Giant",
"EX1_166a": "Moonfire",
"EX1_408": "Mortal Strike",
"EX1_105": "Mountain Giant",
"EX1_509": "Murloc Tidecaller",
"EX1_507": "Murloc Warleader",
"EX1_557": "Nat Pagle",
"EX1_161": "Naturalize",
"DREAM_05": "Nightmare",
"EX1_130": "Noble Sacrifice",
"EX1_164b": "Nourish",
"EX1_164a": "Nourish",
"EX1_164": "Nourish",
"EX1_560": "Nozdormu",
"EX1_562": "Onyxia",
"EX1_160t": "Panther",
"EX1_522": "Patient Assassin",
"EX1_133": "Perdition's Blade",
"EX1_076": "Pint-Sized Summoner",
"EX1_313": "Pit Lord",
"EX1_316": "Power Overwhelming",
"EX1_160": "Power of the Wild",
"EX1_145": "Preparation",
"EX1_583": "Priestess of Elune",
"EX1_350": "Prophet Velen",
"EX1_279": "Pyroblast",
"EX1_044": "Questing Adventurer",
"EX1_412": "Raging Worgen",
"EX1_298": "Ragnaros the Firelord",
"CS2_104": "Rampage",
"CS2_161": "Ravenholdt Assassin",
"EX1_136": "Redemption",
"EX1_379": "Repentance",
"EX1_178a": "Rooted",
"EX1_134": "SI:7 Agent",
"EX1_578": "Savagery",
"EX1_534": "Savannah Highmane",
"EX1_020": "Scarlet Crusader",
"EX1_531": "Scavenging Hyena",
"EX1_586": "Sea Giant",
"EX1_080": "Secretkeeper",
"EX1_317": "Sense Demons",
"EX1_334": "Shadow Madness",
"EX1_345t": "Shadow of Nothing",
"EX1_303": "Shadowflame",
"EX1_625": "Shadowform",
"EX1_144": "Shadowstep",
"EX1_573b": "Shan'do's Lesson",
"EX1_410": "Shield Slam",
"EX1_405": "Shieldbearer",
"EX1_332": "Silence",
"CS2_151": "Silver Hand Knight",
"EX1_023": "Silvermoon Guardian",
"EX1_309": "Siphon Soul",
"EX1_391": "Slam",
"EX1_554t": "Snake",
"EX1_554": "Snake Trap",
"EX1_609": "Snipe",
"EX1_608": "Sorcerer's Apprentice",
"EX1_158": "Soul of the Forest",
"NEW1_027": "Southsea Captain",
"CS2_146": "Southsea Deckhand",
"tt_010a": "Spellbender (minion)",
"tt_010": "Spellbender",
"EX1_048": "Spellbreaker",
"EX1_tk11": "Spirit Wolf",
"CS2_221": "Spiteful Smith",
"CS2_152": "Squire",
"EX1_tk28": "Squirrel",
"NEW1_041": "Stampeding Kodo",
"NEW1_007a": "Starfall",
"NEW1_007b": "Starfall",
"NEW1_007": "Starfall",
"EX1_247": "Stormforged Axe",
"EX1_028": "Stranglethorn Tiger",
"EX1_160a": "Summon a Panther",
"EX1_315": "Summoning Portal",
"EX1_058": "Sunfury Protector",
"EX1_032": "Sunwalker",
"EX1_366": "Sword of Justice",
"EX1_016": "Sylvanas Windrunner",
"EX1_390": "Tauren Warrior",
"EX1_623": "Temple Enforcer",
"EX1_577": "The Beast",
"EX1_002": "The Black Knight",
"EX1_339": "Thoughtsteal",
"EX1_021": "Thrallmar Farseer",
"EX1_083": "Tinkmaster Overspark",
"EX1_383": "Tirion Fordring",
"EX1_tk9": "Treant (charge)",
"EX1_573t": "Treant (taunt)",
"EX1_158t": "Treant",
"EX1_043": "Twilight Drake",
"EX1_312": "Twisting Nether",
"EX1_258": "Unbound Elemental",
"EX1_538": "Unleash the Hounds",
"EX1_409": "Upgrade!",
"EX1_178b": "Uproot",
"EX1_594": "Vaporize",
"CS2_227": "Venture Co. Mercenary",
"NEW1_026t": "Violet Apprentice",
"NEW1_026": "Violet Teacher",
"EX1_304": "Void Terror",
"ds1_whelptoken": "Whelp",
"EX1_116t": "Whelp",
"NEW1_020": "Wild Pyromancer",
"EX1_033": "Windfury Harpy",
"CS2_231": "Wisp",
"EX1_010": "Worgen Infiltrator",
"EX1_317t": "Worthless Imp",
"EX1_154b": "Wrath",
"EX1_154a": "Wrath",
"EX1_154": "Wrath",
"CS2_169": "Young Dragonhawk",
"EX1_004": "Young Priestess",
"EX1_049": "Youthful Brewmaster",
"EX1_572": "Ysera",
"DREAM_02": "Ysera Awakens",
"EX1_066": "Acidic Swamp Ooze",
"CS2_041": "Ancestral Healing",
"NEW1_031": "Animal Companion",
"CS2_025": "Arcane Explosion",
"CS2_023": "Arcane Intellect",
"EX1_277": "Arcane Missiles",
"DS1_185": "Arcane Shot",
"CS2_112": "Arcanite Reaper",
"CS2_155": "Archmage",
"CS2_080": "Assassin's Blade",
"CS2_076": "Assassinate",
"GAME_002": "Avatar of the Coin",
"CS2_072": "Backstab",
"CS2_092": "Blessing of Kings",
"CS2_087": "Blessing of Might",
"CS2_172": "Bloodfen Raptor",
"CS2_046": "Bloodlust",
"CS2_173": "Bluegill Warrior",
"CS2_boar": "Boar",
"CS2_187": "Booty Bay Bodyguard",
"CS2_200": "Boulderfist Ogre",
"CS2_103": "Charge",
"CS2_182": "Chillwind Yeti",
"CS2_005": "Claw",
"CS2_114": "Cleave",
"CS2_093": "Consecration",
"CS2_201": "Core Hound",
"CS2_063": "Corruption",
"EX1_582": "Dalaran Mage",
"DS1_055": "Darkscale Healer",
"CS2_074": "Deadly Poison",
"CS2_236": "Divine Spirit",
"EX1_025": "Dragonling Mechanic",
"CS2_061": "Drain Life",
"CS2_064": "Dread Infernal",
"CS2_189": "Elven Archer",
"CS2_013t": "Excess Mana",
"CS2_108": "Execute",
"EX1_129": "Fan of Knives",
"CS2_106": "Fiery War Axe",
"CS2_042": "Fire Elemental",
"CS2_029": "Fireball",
"CS2_032": "Flamestrike",
"EX1_565": "Flametongue Totem",
"hexfrog": "Frog",
"CS2_026": "Frost Nova",
"CS2_037": "Frost Shock",
"CS2_024": "Frostbolt",
"CS2_121": "Frostwolf Grunt",
"CS2_226": "Frostwolf Warlord",
"CS2_147": "Gnomish Inventor",
"CS1_042": "Goldshire Footman",
"EX1_508": "Grimscale Oracle",
"CS2_088": "Guardian of Kings",
"EX1_399": "Gurubashi Berserker",
"CS2_094": "Hammer of Wrath",
"EX1_371": "Hand of Protection",
"NEW1_009": "Healing Totem",
"CS2_007": "Healing Touch",
"CS2_062": "Hellfire",
"CS2_105": "Heroic Strike",
"EX1_246": "Hex",
"CS2_089": "Holy Light",
"CS1_112": "Holy Nova",
"CS1_130": "Holy Smite",
"DS1_070": "Houndmaster",
"NEW1_034": "Huffer",
"EX1_360": "Humility",
"CS2_084": "Hunter's Mark",
"EX1_169": "Innervate",
"CS2_232": "Ironbark Protector",
"CS2_141": "Ironforge Rifleman",
"CS2_125": "Ironfur Grizzly",
"EX1_539": "Kill Command",
"CS2_142": "Kobold Geomancer",
"NEW1_011": "Kor'kron Elite",
"NEW1_033": "Leokk",
"CS2_091": "Light's Justice",
"CS2_162": "Lord of the Arena",
"CS2_118": "Magma Rager",
"CS2_009": "Mark of the Wild",
"EX1_025t": "Mechanical Dragonling",
"DS1_233": "Mind Blast",
"CS1_113": "Mind Control",
"CS2_003": "Mind Vision",
"CS2_mirror": "Mirror Image (minion)",
"CS2_027": "Mirror Image",
"NEW1_032": "Misha",
"CS2_008": "Moonfire",
"EX1_302": "Mortal Coil",
"DS1_183": "Multi-Shot",
"CS2_168": "Murloc Raider",
"EX1_506a": "Murloc Scout",
"EX1_506": "Murloc Tidehunter",
"GAME_006": "NOOOOOOOOOOOO",
"EX1_593": "Nightblade",
"CS2_235": "Northshire Cleric",
"EX1_015": "Novice Engineer",
"CS2_119": "Oasis Snapjaw",
"CS2_197": "Ogre Magi",
"CS2_022": "Polymorph",
"CS2_004": "Power Word: Shield",
"CS2_122": "Raid Leader",
"CS2_196": "Razorfen Hunter",
"CS2_213": "Reckless Rocketeer",
"CS2_120": "River Crocolisk",
"CS2_045": "Rockbiter Weapon",
"NEW1_003": "Sacrificial Pact",
"EX1_581": "Sap",
"CS2_011": "Savage Roar",
"CS2_050": "Searing Totem",
"CS2_179": "Sen'jin Shieldmasta",
"CS2_057": "Shadow Bolt",
"EX1_622": "Shadow Word: Death",
"CS2_234": "Shadow Word: Pain",
"EX1_019": "Shattered Sun Cleric",
"CS2_tk1": "Sheep",
"EX1_606": "Shield Block",
"EX1_278": "Shiv",
"CS2_101t": "Silver Hand Recruit",
"CS2_127": "Silverback Patriarch",
"CS2_075": "Sinister Strike",
"skele11": "Skeleton",
"EX1_308": "Soulfire",
"CS2_077": "Sprint",
"EX1_173": "Starfire",
"CS2_237": "Starving Buzzard",
"CS2_051": "Stoneclaw Totem",
"CS2_171": "Stonetusk Boar",
"CS2_150": "Stormpike Commando",
"CS2_222": "Stormwind Champion",
"CS2_131": "Stormwind Knight",
"EX1_306": "Succubus",
"CS2_012": "Swipe",
"GAME_005": "The Coin",
"DS1_175": "Timber Wolf",
"EX1_244": "Totemic Might",
"DS1_184": "Tracking",
"CS2_097": "Truesilver Champion",
"DS1_178": "Tundra Rhino",
"NEW1_004": "Vanish",
"CS2_065": "Voidwalker",
"EX1_011": "Voodoo Doctor",
"CS2_186": "War Golem",
"EX1_084": "Warsong Commander",
"CS2_033": "Water Elemental",
"EX1_400": "Whirlwind",
"CS2_082": "Wicked Knife",
"CS2_013": "Wild Growth",
"CS2_039": "Windfury",
"EX1_587": "Windspeaker",
"CS2_124": "Wolfrider",
"CS2_052": "Wrath of Air Totem",
"FP1_026": "Anub'ar Ambusher",
"FP1_020": "Avenge",
"FP1_031": "Baron Rivendare",
"FP1_029": "Dancing Swords",
"FP1_023": "Dark Cultist",
"FP1_021": "Death's Bite",
"NAX6_03": "Deathbloom",
"FP1_006": "Deathcharger",
"FP1_009": "Deathlord",
"FP1_018": "Duplicate",
"FP1_003": "Echoing Ooze",
"NAX12_04": "Enrage",
"NAX11_03": "Fallout Slime",
"NAX13_04H": "Feugen",
"FP1_015": "Feugen",
"NAX14_03": "Frozen Champion",
"NAX15_03t": "Guardian of Icecrown",
"NAX15_03n": "Guardian of Icecrown",
"FP1_002": "Haunted Creeper",
"NAX10_02": "Hook",
"NAX10_02H": "Hook",
"NAX12_03": "Jaws",
"NAX12_03H": "Jaws",
"FP1_013": "Kel'Thuzad",
"NAX9_02H": "Lady Blaumeux",
"NAX9_02": "Lady Blaumeux",
"FP1_030": "Loatheb",
"NAX1_05": "Locust Swarm",
"FP1_004": "Mad Scientist",
"FP1_010": "Maexxna",
"NAX9_07": "Mark of the Horsemen",
"NAX7_04H": "Massive Runeblade",
"NAX7_04": "Massive Runeblade",
"NAX7_05": "Mind Control Crystal",
"NAX5_03": "Mindpocalypse",
"NAX15_05": "Mr. Bigglesworth",
"NAX11_04": "Mutating Injection",
"NAXM_001": "Necroknight",
"NAX3_03": "Necrotic Poison",
"FP1_017": "Nerub'ar Weblord",
"NAX1h_03": "Nerubian (normal)",
"NAX1_03": "Nerubian (heroic)",
"FP1_007t": "Nerubian",
"FP1_007": "Nerubian Egg",
"NAX4_05": "Plague",
"FP1_019": "Poison Seeds",
"NAX14_04": "Pure Cold",
"FP1_025": "Reincarnate",
"NAX9_05H": "Runeblade",
"NAX9_05": "Runeblade",
"FP1_005": "Shade of Naxxramas",
"NAX9_04": "Sir Zeliek",
"NAX9_04H": "Sir Zeliek",
"NAXM_002": "Skeletal Smith",
"NAX4_03H": "Skeleton",
"NAX4_03": "Skeleton",
"FP1_012t": "Slime",
"FP1_012": "Sludge Belcher",
"FP1_008": "Spectral Knight",
"NAX8_05t": "Spectral Rider",
"FP1_002t": "Spectral Spider",
"NAX8_03t": "Spectral Trainee",
"NAX8_04t": "Spectral Warrior",
"NAX6_03t": "Spore",
"NAX6_04": "Sporeburst",
"NAX13_05H": "Stalagg",
"FP1_014": "Stalagg",
"FP1_027": "Stoneskin Gargoyle",
"NAX13_03": "Supercharge",
"FP1_014t": "Thaddius",
"NAX9_03H": "Thane Korth'azz",
"NAX9_03": "Thane Korth'azz",
"FP1_019t": "Treant (poison seeds)",
"NAX7_02": "Understudy",
"FP1_028": "Undertaker",
"NAX8_05": "Unrelenting Rider",
"NAX8_03": "Unrelenting Trainee",
"NAX8_04": "Unrelenting Warrior",
"FP1_024": "Unstable Ghoul",
"FP1_022": "Voidcaller",
"FP1_016": "Wailing Soul",
"FP1_011": "Webspinner",
"NAX2_05": "Worshipper",
"NAX2_05H": "Worshipper",
"FP1_001": "Zombie Chow",
"GVG_029": "Ancestor's Call",
"GVG_077": "Anima Golem",
"GVG_085": "Annoy-o-Tron",
"GVG_030": "Anodized Robo Cub",
"GVG_069": "Antique Healbot",
"GVG_091": "Arcane Nullifier X-21",
"PART_001": "Armor Plating",
"GVG_030a": "Attack Mode",
"GVG_119": "Blingtron 3000",
"GVG_063": "Bolvar Fordragon",
"GVG_099": "Bomb Lobber",
"GVG_110t": "Boom Bot",
"GVG_050": "Bouncing Blade",
"GVG_068": "Burly Rockjaw Trogg",
"GVG_056t": "Burrowing Mine",
"GVG_017": "Call Pet",
"GVG_092t": "Chicken (Gnomish Experimenter)",
"GVG_121": "Clockwork Giant",
"GVG_082": "Clockwork Gnome",
"GVG_062": "Cobalt Guardian",
"GVG_073": "Cobra Shot",
"GVG_059": "Coghammer",
"GVG_013": "Cogmaster",
"GVG_024": "Cogmaster's Wrench",
"GVG_038": "Crackle",
"GVG_052": "Crush",
"GVG_041": "Dark Wispers",
"GVG_041b": "Dark Wispers",
"GVG_041a": "Dark Wispers",
"GVG_015": "Darkbomb",
"GVG_019": "Demonheart",
"GVG_110": "Dr. Boom",
"GVG_080t": "Druid of the Fang (cobra)",
"GVG_080": "Druid of the Fang",
"GVG_066": "Dunemaul Shaman",
"GVG_005": "Echo of Medivh",
"PART_005": "Emergency Coolant",
"GVG_107": "Enhance-o Mechano",
"GVG_076": "Explosive Sheep",
"GVG_026": "Feign Death",
"GVG_020": "Fel Cannon",
"GVG_016": "Fel Reaver",
"PART_004": "Finicky Cloakfield",
"GVG_007": "Flame Leviathan",
"GVG_001": "Flamecannon",
"GVG_100": "Floating Watcher",
"GVG_084": "Flying Machine",
"GVG_113": "Foe Reaper 4000",
"GVG_079": "Force-Tank MAX",
"GVG_049": "Gahz'rilla",
"GVG_028t": "Gallywix's Coin",
"GVG_117": "Gazlowe",
"GVG_032b": "Gift of Cards",
"GVG_032a": "Gift of Mana",
"GVG_081": "Gilblin Stalker",
"GVG_043": "Glaivezooka",
"GVG_098": "Gnomeregan Infantry",
"GVG_092": "Gnomish Experimenter",
"GVG_023": "Goblin Auto-Barber",
"GVG_004": "Goblin Blastmage",
"GVG_095": "Goblin Sapper",
"GVG_032": "Grove Tender",
"GVG_120": "Hemet Nesingwary",
"GVG_104": "Hobgoblin",
"GVG_089": "Illuminator",
"GVG_045t": "Imp (warlock)",
"GVG_045": "Imp-losion",
"GVG_056": "Iron Juggernaut",
"GVG_027": "Iron Sensei",
"GVG_094": "Jeeves",
"GVG_106": "Junkbot",
"GVG_074": "Kezan Mystic",
"GVG_046": "King of Beasts",
"GVG_012": "Light of the Naaru",
"GVG_008": "Lightbomb",
"GVG_097": "Lil' Exorcist",
"GVG_071": "Lost Tallstrider",
"GVG_090": "Madder Bomber",
"GVG_021": "Mal'Ganis",
"GVG_035": "Malorne",
"GVG_034": "Mech-Bear-Cat",
"GVG_078": "Mechanical Yeti",
"GVG_006": "Mechwarper",
"GVG_116": "Mekgineer Thermaplugg",
"GVG_048": "Metaltooth Leaper",
"GVG_103": "Micro Machine",
"GVG_111": "Mimiron's Head",
"GVG_109": "Mini-Mage",
"GVG_018": "Mistress of Pain",
"GVG_112": "Mogor the Ogre",
"GVG_061": "Muster for Battle",
"GVG_042": "Neptulon",
"GVG_065": "Ogre Brute",
"GVG_088": "Ogre Ninja",
"GVG_054": "Ogre Warmaul",
"GVG_025": "One-eyed Cheat",
"GVG_096": "Piloted Shredder",
"GVG_105": "Piloted Sky Golem",
"GVG_036": "Powermace",
"GVG_064": "Puddlestomper",
"GVG_060": "Quartermaster",
"GVG_108": "Recombobulator",
"GVG_031": "Recycle",
"PART_006": "Reversing Switch",
"PART_003": "Rusty Horn",
"GVG_047": "Sabotage",
"GVG_070": "Salty Dog",
"GVG_101": "Scarlet Purifier",
"GVG_055": "Screwjank Clunker",
"GVG_057": "Seal of Light",
"GVG_009": "Shadowbomber",
"GVG_072": "Shadowboxer",
"GVG_058": "Shielded Minibot",
"GVG_053": "Shieldmaiden",
"GVG_075": "Ship's Cannon",
"GVG_011": "Shrinkmeister",
"GVG_086": "Siege Engine",
"GVG_040": "Siltfin Spiritwalker",
"GVG_114": "Sneed's Old Shredder",
"GVG_002": "Snowchugger",
"GVG_123": "Soot Spewer",
"GVG_044": "Spider Tank",
"GVG_087": "Steamwheedle Sniper",
"GVG_067": "Stonesplinter Trogg",
"GVG_030b": "Tank Mode",
"GVG_093": "Target Dummy",
"PART_002": "Time Rewinder",
"GVG_022": "Tinker's Sharpsword Oil",
"GVG_102": "Tinkertown Technician",
"GVG_115": "Toshley",
"GVG_028": "Trade Prince Gallywix",
"GVG_033": "Tree of Life",
"GVG_118": "Troggzor the Earthinator",
"GVG_003": "Unstable Portal",
"GVG_083": "Upgraded Repair Bot",
"GVG_111t": "V-07-TR-0N",
"GVG_010": "Velen's Chosen",
"GVG_039": "Vitality Totem",
"GVG_014": "Vol'jin",
"GVG_051": "Warbot",
"GVG_122": "Wee Spellstopper",
"PART_007": "Whirling Blades",
"GVG_037": "Whirling Zap-o-matic",
"NEW1_016": "Captain's Parrot",
"EX1_062": "Old Murk-Eye",
"Mekka4t": "Chicken",
"PRO_001": "Elite Tauren Chieftain",
"Mekka3": "Emboldener 3000",
"EX1_112": "Gelbin Mekkatorque",
"Mekka1": "Homing Chicken",
"PRO_001a": "I Am Murloc",
"PRO_001at": "Murloc",
"Mekka4": "Poultryizer",
"PRO_001c": "Power of the Horde",
"Mekka2": "Repair Bot",
"PRO_001b": "Rogues Do It...",
"BRM_016": "Axe Flinger",
"BRM_034": "Blackwing Corruptor",
"BRM_033": "Blackwing Technician",
"BRM_031": "Chromaggus",
"BRM_014": "Core Rager",
"BRM_008": "Dark Iron Skulker",
"BRM_005": "Demonwrath",
"BRM_018": "Dragon Consort",
"BRM_022": "Dragon Egg",
"BRM_003": "Dragon's Breath",
"BRM_020": "Dragonkin Sorcerer",
"BRM_024": "Drakonid Crusher",
"BRM_010": "Druid of the Flame",
"BRM_028": "Emperor Thaurissan",
"BRM_012": "Fireguard Destroyer",
"BRM_002": "Flamewaker",
"BRM_007": "Gang Up",
"BRM_019": "Grim Patron",
"BRM_026": "Hungry Dragon",
"BRM_006": "Imp Gang Boss",
"BRM_011": "Lava Shock",
"BRM_027": "Majordomo Executus",
"BRM_030": "Nefarian",
"BRM_013": "Quick Shot",
"BRM_029": "Rend Blackhand",
"BRM_017": "Resurrect",
"BRM_015": "Revenge",
"BRM_001": "Solemn Vigil",
"BRM_004": "Twilight Whelp",
"BRM_025": "Volcanic Drake",
"BRM_009": "Volcanic Lumberer",
}
| slaymaker1907/hearthbreaker | tests/card_tests/id_mapping.py | Python | mit | 23,324 | 0 |
import re
from iota import *
import praw
import sqlite3
import random
import string
from iota.adapter.wrappers import RoutingWrapper
import config
import urllib.request
from urllib.error import HTTPError
import json
import math
node_address = config.node_address
class api:
def __init__(self,seed,prod=True):
self.address_index = 1
if prod:
self.init_db()
self.iota_api = Iota(
RoutingWrapper(node_address)
.add_route('attachToTangle','http://localhost:14265'),seed)
def init_db(self):
self.conn = sqlite3.connect(config.database_name)
self.db = self.conn.cursor()
self.create_database()
self.address_index = len(self.db.execute("SELECT * FROM usedAddresses").fetchall())
def init_custom_db(self,name):
self.conn = sqlite3.connect(name)
self.db = self.conn.cursor()
self.create_database()
self.address_index = len(self.db.execute("SELECT * FROM usedAddresses").fetchall())
def get_iota_value(self,amount):
try:
with urllib.request.urlopen('https://api.coinmarketcap.com/v1/ticker/iota/') as url:
data = json.loads(url.read().decode())[0]
price = data['price_usd']
value = (amount/1000000)*float(price)
return value
except:
return amount/1000000
#---------IOTA API FUNCTIONS--------------#
def send_transfer(self,addr,amount):
ret = self.iota_api.send_transfer(
depth = 3,
transfers = [
ProposedTransaction(
address = Address(
addr
),
value = amount,
),
],
min_weight_magnitude=15
)
return ret
def get_account_balance(self):
addresses = self.iota_api.get_new_addresses(0,self.address_index)['addresses']
balances = self.iota_api.get_balances(addresses)['balances']
total = 0
for balance in balances:
total = total + balance
return total
def get_balance(self,address):
address_data = self.iota_api.get_balances([address])
return address_data['balances'][0]
def get_new_address(self):
addresses = self.iota_api.get_new_addresses(self.address_index,1)
for address in addresses['addresses']:
address = address.with_valid_checksum()
self.add_used_address(self.address_index,address._trytes.decode("utf-8"))
self.address_index = self.address_index + 1
if self.get_balance(address) > 0:
return self.get_new_address()
return address
def create_seed(self):
seed = ''.join(random.choice(string.ascii_uppercase + "9") for _ in range(81))
return seed
def check_transaction(self,transaction):
transaction_hash = transaction['bundle'].hash
inclusion_states = self.iota_api.get_latest_inclusion([transaction_hash])
return inclusion_states['states'][transaction_hash]
def replay_bundle(self,transaction):
transaction_hash = transaction['bundle'].tail_transaction.hash
self.iota_api.replay_bundle(transaction_hash,3,15)
#-------------MESSAGE REGEX FUNCTIONS---------------#
#Check if the message body or subject contains a fund/deposit request
def is_deposit_request(self,message):
fund_string = re.compile("Fund",re.I)
deposit_string = re.compile("Deposit",re.I)
match = fund_string.search(message.subject)
if match:
return True
match = fund_string.search(message.body)
if match:
return True
match = deposit_string.search(message.subject)
if match:
return True
match = deposit_string.search(message.body)
if match:
return True
return False
#Check if the message body or subject contains a withdraw request
def is_withdraw_request(self,message):
withdraw_string = re.compile("Withdraw",re.I)
match = withdraw_string.search(message.subject)
if match:
return True
match = withdraw_string.search(message.body)
if match:
return True
return False
#Check if the message body or subject contains a balance request
def is_balance_request(self,message):
balance_string = re.compile("Balance",re.I)
match = balance_string.search(message.subject)
if match:
return True
match = balance_string.search(message.body)
if match:
return True
return False
#Check if the message body or subject contains a help/commands request
def is_help_request(self,message):
help_string = re.compile("Help",re.I)
commands_string = re.compile("Commands",re.I)
match = help_string.search(message.subject)
if match:
return True
match = help_string.search(message.body)
if match:
return True
match = commands_string.search(message.subject)
if match:
return True
match = commands_string.search(message.body)
if match:
return True
return False
#Check if the message body contains an iota amount
def contains_iota_amount(self,message):
iota_amount_string = re.compile("([0-9]+)\s*iota",re.I)
miota_amount_string = re.compile("([0-9]+)\s*miota",re.I)
match = iota_amount_string.search(message.body)
if match:
return True
match = miota_amount_string.search(message.body)
if match:
return True
return False
#Return the iota amount refrenced in the message, convets miota to iota
def get_iota_tip_amount(self,message):
iota_amount_string = re.compile("\+\s*([0-9]+)\s*iota",re.I)
miota_amount_string = re.compile("\+\s*([0-9]+)\s*miota",re.I)
match = iota_amount_string.search(message.body)
if match:
return int(match.group(1))
match = miota_amount_string.search(message.body)
if match:
return (int(match.group(1))*1000000)
def get_iota_amount(self,message):
iota_amount_string = re.compile("([0-9]+)\s*iota",re.I)
miota_amount_string = re.compile("([0-9]+)\s*miota",re.I)
match = iota_amount_string.search(message.body)
if match:
return int(match.group(1))
match = miota_amount_string.search(message.body)
if match:
return (int(match.group(1))*1000000)
def get_message_address(self,message):
address_string = re.compile("[A-Z,9]{90}")
match = address_string.search(message.body)
if match:
return bytearray(match.group(0),"utf-8")
else:
return None
def is_tip(self,comment):
tip_string_iota = re.compile("\+\s*[0-9]+\s*iota",re.I)
tip_string_miota = re.compile("\+\s*[0-9]+\s*miota",re.I)
text = comment.body
match = tip_string_iota.search(text)
if match:
return True
match = tip_string_miota.search(text)
if match:
return True
return False
def is_donation_request(self,message):
donate_string = re.compile("donat",re.I)
match = donate_string.search(message.subject)
if match:
return True
match = donate_string.search(message.body)
if match:
return True
return False
#--------------------Database Functions----------------------#
def create_database(self):
self.db.execute("CREATE TABLE IF NOT EXISTS users (redditUsername TEXT PRIMARY KEY, balance INTEGER)")
self.conn.commit()
self.db.execute("CREATE TABLE IF NOT EXISTS commentsRepliedTo (commentId TEXT PRIMARY KEY)")
self.conn.commit()
self.db.execute("CREATE TABLE IF NOT EXISTS usedAddresses (addressIndex INTEGER PRIMARY KEY, address TEXT)")
self.conn.commit()
self.db.execute("CREATE TABLE IF NOT EXISTS depositRequests (messageId TEXT PRIMARY KEY, address TEXT)")
self.conn.commit()
self.db.execute("CREATE TABLE IF NOT EXISTS withdrawRequests (messageId TEXT PRIMARY KEY, address TEXT, amount INTEGER)")
self.conn.commit()
def add_new_user(self,reddit_username):
entry = self.db.execute("SELECT * FROM users WHERE redditUsername=?",(reddit_username,)).fetchone()
if not entry:
self.db.execute("INSERT INTO users(redditUsername,balance) VALUES (?,?)",(reddit_username,0))
self.conn.commit()
def set_balance(self,reddit_username, amount):
entry = self.db.execute("SELECT * FROM users WHERE redditUsername=?",(reddit_username,)).fetchone()
if entry:
self.db.execute("UPDATE users SET balance=? WHERE redditUsername=?",(amount,reddit_username))
self.conn.commit()
else:
self.add_new_user(reddit_username)
self.set_balance(reddit_username,amount)
#Adds to a users balance
def add_balance(self,reddit_username,amount):
entry = self.db.execute("SELECT * FROM users WHERE redditUsername=?",(reddit_username,)).fetchone()
if entry:
balance = entry[1]
balance = balance + amount
self.set_balance(reddit_username,balance)
else:
self.add_new_user(reddit_username)
self.add_balance(reddit_username,amount)
#Subtracts from a users balance
def subtract_balance(self,reddit_username,amount):
entry = self.db.execute("SELECT * FROM users WHERE redditUsername=?",(reddit_username,)).fetchone()
if entry:
balance = entry[1]
balance = balance - amount
self.set_balance(reddit_username,balance)
#Checks if the user has at least the given amount
def check_balance(self,reddit_username,amount):
entry = self.db.execute("SELECT * FROM users WHERE redditUsername=?",(reddit_username,)).fetchone()
if entry:
balance = entry[1]
if amount > balance:
return False
else:
return True
else:
return False
#Gets the balance for the speicifed user
def get_user_balance(self,reddit_username):
entry = self.db.execute("SELECT * FROM users WHERE redditUsername=?",(reddit_username,)).fetchone()
if entry:
balance = entry[1]
return balance
else:
self.add_new_user(reddit_username)
return self.get_user_balance(reddit_username)
def get_total_balance(self):
query = self.db.execute("SELECT * FROM users").fetchall()
total = 0
for entry in query:
total = total + entry[1]
return total
def get_comments_replied_to(self):
query = self.db.execute("SELECT commentId FROM commentsRepliedTo").fetchall()
comments = []
for entry in query:
comments.append(entry[0])
return comments
def add_replied_to_comment(self,commentId):
self.db.execute("INSERT INTO commentsRepliedTo(commentId) VALUES (?)",(commentId,))
self.conn.commit()
def add_used_address(self,index,address):
self.db.execute("INSERT INTO usedAddresses(addressIndex,address) VALUES (?,?)",(index,address))
self.conn.commit()
def add_deposit_request(self,request):
if request['type'] == 'deposit':
address = request['address']
reddit_username = request['reddit_username']
message = request['message']
message_id = message.fullname
query = self.db.execute("SELECT * FROM depositRequests WHERE messageId=?",(message_id,)).fetchone()
if query is not None:
return
self.db.execute("INSERT INTO depositRequests(messageId,address) VALUES (?,?)",(message_id,address._trytes.decode("utf-8")))
self.conn.commit()
def remove_deposit_request(self,request):
if request['type'] == 'deposit':
message = request['message']
message_id = message.fullname
self.db.execute("DELETE FROM depositRequests WHERE messageId=?",(message_id,))
self.conn.commit()
def get_deposit_requests(self):
query = self.db.execute("SELECT * FROM depositRequests")
return query.fetchall()
def add_withdraw_request(self,request):
address = request['address']
message = request['message']
message_id = message.fullname
amount = request['amount']
self.db.execute("INSERT INTO withdrawRequests(messageId,address,amount) VALUES (?,?,?)",(message_id,address.decode("utf-8"),amount))
self.conn.commit()
def remove_withdraw_request(self,request):
message = request['message']
message_id = message.fullname
self.db.execute("DELETE FROM withdrawRequests WHERE messageId=?",(message_id,))
self.conn.commit()
def get_withdraw_requests(self):
query = self.db.execute("SELECT * FROM withdrawRequests")
return query.fetchall()
def get_used_addresses(self):
query = self.db.execute("SELECT * FROM usedAddresses")
return query.fetchall()
| normpad/iotatipbot | test/bot_api.py | Python | gpl-3.0 | 13,535 | 0.012412 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class LlvmLld(CMakePackage):
"""lld - The LLVM Linker
lld is a new set of modular code for creating linker tools."""
homepage = "http://lld.llvm.org"
url = "http://llvm.org/releases/3.4/lld-3.4.src.tar.gz"
version('3.4', '3b6a17e58c8416c869c14dd37682f78e')
depends_on('llvm')
depends_on('cmake@2.8:', type='build')
def cmake_args(self):
if 'CXXFLAGS' in env and env['CXXFLAGS']:
env['CXXFLAGS'] += ' ' + self.compiler.cxx11_flag
else:
env['CXXFLAGS'] = self.compiler.cxx11_flag
return [
'-DLLD_PATH_TO_LLVM_BUILD=%s' % self.spec['llvm'].prefix,
'-DLLVM_MAIN_SRC_DIR=%s' % self.spec['llvm'].prefix,
]
| TheTimmy/spack | var/spack/repos/builtin/packages/llvm-lld/package.py | Python | lgpl-2.1 | 1,980 | 0.000505 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
import operator
import uuid
from functools import partial
from inspect import getmembers
from io import FileIO
from six import iteritems, string_types, text_type
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleParserError
from ansible.parsing import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.template import Templar
from ansible.utils.boolean import boolean
from ansible.utils.debug import debug
from ansible.utils.vars import combine_vars, isidentifier
from ansible.template import template
class Base:
# connection/transport
_connection = FieldAttribute(isa='string')
_port = FieldAttribute(isa='int')
_remote_user = FieldAttribute(isa='string')
# variables
_vars = FieldAttribute(isa='dict', default=dict(), priority=100)
# flags and misc. settings
_environment = FieldAttribute(isa='list')
_no_log = FieldAttribute(isa='bool')
# param names which have been deprecated/removed
DEPRECATED_ATTRIBUTES = [
'sudo', 'sudo_user', 'sudo_pass', 'sudo_exe', 'sudo_flags',
'su', 'su_user', 'su_pass', 'su_exe', 'su_flags',
]
def __init__(self):
# initialize the data loader and variable manager, which will be provided
# later when the object is actually loaded
self._loader = None
self._variable_manager = None
# every object gets a random uuid:
self._uuid = uuid.uuid4()
# and initialize the base attributes
self._initialize_base_attributes()
try:
from __main__ import display
self._display = display
except ImportError:
from ansible.utils.display import Display
self._display = Display()
# The following three functions are used to programatically define data
# descriptors (aka properties) for the Attributes of all of the playbook
# objects (tasks, blocks, plays, etc).
#
# The function signature is a little strange because of how we define
# them. We use partial to give each method the name of the Attribute that
# it is for. Since partial prefills the positional arguments at the
# beginning of the function we end up with the first positional argument
# being allocated to the name instead of to the class instance (self) as
# normal. To deal with that we make the property name field the first
# positional argument and self the second arg.
#
# Because these methods are defined inside of the class, they get bound to
# the instance when the object is created. After we run partial on them
# and put the result back into the class as a property, they get bound
# a second time. This leads to self being placed in the arguments twice.
# To work around that, we mark the functions as @staticmethod so that the
# first binding to the instance doesn't happen.
@staticmethod
def _generic_g(prop_name, self):
method = "_get_attr_%s" % prop_name
if hasattr(self, method):
return getattr(self, method)()
value = self._attributes[prop_name]
if value is None and hasattr(self, '_get_parent_attribute'):
value = self._get_parent_attribute(prop_name)
return value
@staticmethod
def _generic_s(prop_name, self, value):
self._attributes[prop_name] = value
@staticmethod
def _generic_d(prop_name, self):
del self._attributes[prop_name]
def _get_base_attributes(self):
'''
Returns the list of attributes for this class (or any subclass thereof).
If the attribute name starts with an underscore, it is removed
'''
base_attributes = dict()
for (name, value) in getmembers(self.__class__):
if isinstance(value, Attribute):
if name.startswith('_'):
name = name[1:]
base_attributes[name] = value
return base_attributes
def _initialize_base_attributes(self):
# each class knows attributes set upon it, see Task.py for example
self._attributes = dict()
for (name, value) in self._get_base_attributes().items():
getter = partial(self._generic_g, name)
setter = partial(self._generic_s, name)
deleter = partial(self._generic_d, name)
# Place the property into the class so that cls.name is the
# property functions.
setattr(Base, name, property(getter, setter, deleter))
# Place the value into the instance so that the property can
# process and hold that value/
setattr(self, name, value.default)
def preprocess_data(self, ds):
''' infrequently used method to do some pre-processing of legacy terms '''
for base_class in self.__class__.mro():
method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None)
if method:
return method(ds)
return ds
def load_data(self, ds, variable_manager=None, loader=None):
''' walk the input datastructure and assign any values '''
assert ds is not None
# cache the datastructure internally
setattr(self, '_ds', ds)
# the variable manager class is used to manage and merge variables
# down to a single dictionary for reference in templating, etc.
self._variable_manager = variable_manager
# the data loader class is used to parse data from strings and files
if loader is not None:
self._loader = loader
else:
self._loader = DataLoader()
# call the preprocess_data() function to massage the data into
# something we can more easily parse, and then call the validation
# function on it to ensure there are no incorrect key values
ds = self.preprocess_data(ds)
self._validate_attributes(ds)
# Walk all attributes in the class. We sort them based on their priority
# so that certain fields can be loaded before others, if they are dependent.
# FIXME: we currently don't do anything with private attributes but
# may later decide to filter them out of 'ds' here.
base_attributes = self._get_base_attributes()
for name, attr in sorted(base_attributes.items(), key=operator.itemgetter(1)):
# copy the value over unless a _load_field method is defined
if name in ds:
method = getattr(self, '_load_%s' % name, None)
if method:
self._attributes[name] = method(name, ds[name])
else:
self._attributes[name] = ds[name]
# run early, non-critical validation
self.validate()
# return the constructed object
return self
def get_ds(self):
try:
return getattr(self, '_ds')
except AttributeError:
return None
def get_loader(self):
return self._loader
def get_variable_manager(self):
return self._variable_manager
def _validate_attributes(self, ds):
'''
Ensures that there are no keys in the datastructure which do
not map to attributes for this object.
'''
valid_attrs = frozenset(name for name in self._get_base_attributes())
for key in ds:
if key not in valid_attrs:
raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
def validate(self, all_vars=dict()):
''' validation that is done at parse time, not load time '''
# walk all fields in the object
for (name, attribute) in iteritems(self._get_base_attributes()):
# run validator only if present
method = getattr(self, '_validate_%s' % name, None)
if method:
method(attribute, name, getattr(self, name))
else:
# and make sure the attribute is of the type it should be
value = getattr(self, name)
if value is not None:
if attribute.isa == 'string' and isinstance(value, (list, dict)):
raise AnsibleParserError("The field '%s' is supposed to be a string type, however the incoming data structure is a %s" % (name, type(value)), obj=self.get_ds())
def copy(self):
'''
Create a copy of this object and return it.
'''
new_me = self.__class__()
for name in self._get_base_attributes():
setattr(new_me, name, getattr(self, name))
new_me._loader = self._loader
new_me._variable_manager = self._variable_manager
# if the ds value was set on the object, copy it to the new copy too
if hasattr(self, '_ds'):
new_me._ds = self._ds
return new_me
def post_validate(self, templar):
'''
we can't tell that everything is of the right type until we have
all the variables. Run basic types (from isa) as well as
any _post_validate_<foo> functions.
'''
basedir = None
if self._loader is not None:
basedir = self._loader.get_basedir()
# save the omit value for later checking
omit_value = templar._available_variables.get('omit')
for (name, attribute) in iteritems(self._get_base_attributes()):
if getattr(self, name) is None:
if not attribute.required:
continue
else:
raise AnsibleParserError("the field '%s' is required but was not set" % name)
elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):
# Intermediate objects like Play() won't have their fields validated by
# default, as their values are often inherited by other objects and validated
# later, so we don't want them to fail out early
continue
try:
# Run the post-validator if present. These methods are responsible for
# using the given templar to template the values, if required.
method = getattr(self, '_post_validate_%s' % name, None)
if method:
value = method(attribute, getattr(self, name), templar)
else:
# if the attribute contains a variable, template it now
value = templar.template(getattr(self, name))
# if this evaluated to the omit value, set the value back to
# the default specified in the FieldAttribute and move on
if omit_value is not None and value == omit_value:
value = attribute.default
continue
# and make sure the attribute is of the type it should be
if value is not None:
if attribute.isa == 'string':
value = text_type(value)
elif attribute.isa == 'int':
value = int(value)
elif attribute.isa == 'float':
value = float(value)
elif attribute.isa == 'bool':
value = boolean(value)
elif attribute.isa == 'percent':
# special value, which may be an integer or float
# with an optional '%' at the end
if isinstance(value, string_types) and '%' in value:
value = value.replace('%', '')
value = float(value)
elif attribute.isa == 'list':
if value is None:
value = []
elif not isinstance(value, list):
value = [ value ]
if attribute.listof is not None:
for item in value:
if not isinstance(item, attribute.listof):
raise AnsibleParserError("the field '%s' should be a list of %s, but the item '%s' is a %s" % (name, attribute.listof, item, type(item)), obj=self.get_ds())
elif attribute.required and attribute.listof == string_types:
if item is None or item.strip() == "":
raise AnsibleParserError("the field '%s' is required, and cannot have empty values" % (name,), obj=self.get_ds())
elif attribute.isa == 'set':
if value is None:
value = set()
else:
if not isinstance(value, (list, set)):
value = [ value ]
if not isinstance(value, set):
value = set(value)
elif attribute.isa == 'dict':
if value is None:
value = dict()
elif not isinstance(value, dict):
raise TypeError("%s is not a dictionary" % value)
# and assign the massaged value back to the attribute field
setattr(self, name, value)
except (TypeError, ValueError) as e:
raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
except UndefinedError as e:
if templar._fail_on_undefined_errors and name != 'name':
raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
def serialize(self):
'''
Serializes the object derived from the base object into
a dictionary of values. This only serializes the field
attributes for the object, so this may need to be overridden
for any classes which wish to add additional items not stored
as field attributes.
'''
repr = dict()
for name in self._get_base_attributes():
repr[name] = getattr(self, name)
# serialize the uuid field
repr['uuid'] = getattr(self, '_uuid')
return repr
def deserialize(self, data):
'''
Given a dictionary of values, load up the field attributes for
this object. As with serialize(), if there are any non-field
attribute data members, this method will need to be overridden
and extended.
'''
assert isinstance(data, dict)
for (name, attribute) in iteritems(self._get_base_attributes()):
if name in data:
setattr(self, name, data[name])
else:
setattr(self, name, attribute.default)
# restore the UUID field
setattr(self, '_uuid', data.get('uuid'))
def _load_vars(self, attr, ds):
'''
Vars in a play can be specified either as a dictionary directly, or
as a list of dictionaries. If the later, this method will turn the
list into a single dictionary.
'''
def _validate_variable_keys(ds):
for key in ds:
if not isidentifier(key):
raise TypeError("%s is not a valid variable name" % key)
try:
if isinstance(ds, dict):
_validate_variable_keys(ds)
return ds
elif isinstance(ds, list):
all_vars = dict()
for item in ds:
if not isinstance(item, dict):
raise ValueError
_validate_variable_keys(item)
all_vars = combine_vars(all_vars, item)
return all_vars
elif ds is None:
return {}
else:
raise ValueError
except ValueError:
raise AnsibleParserError("Vars in a %s must be specified as a dictionary, or a list of dictionaries" % self.__class__.__name__, obj=ds)
except TypeError as e:
raise AnsibleParserError("Invalid variable name in vars specified for %s: %s" % (self.__class__.__name__, e), obj=ds)
def _extend_value(self, value, new_value):
'''
Will extend the value given with new_value (and will turn both
into lists if they are not so already). The values are run through
a set to remove duplicate values.
'''
if not isinstance(value, list):
value = [ value ]
if not isinstance(new_value, list):
new_value = [ new_value ]
#return list(set(value + new_value))
return [i for i,_ in itertools.groupby(value + new_value)]
def __getstate__(self):
return self.serialize()
def __setstate__(self, data):
self.__init__()
self.deserialize(data)
| pheanex/ansible | lib/ansible/playbook/base.py | Python | gpl-3.0 | 18,228 | 0.002524 |
# Copyright 2018 - TODAY Serpent Consulting Services Pvt. Ltd.
# (<http://www.serpentcs.com>)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
'name': 'Search Partner Phone/Mobile/Email',
'version': '11.0.1.0.1',
'category': 'Extra Tools',
'summary': 'Partner Search by Phone/Mobile/Email',
'author': "Serpent Consulting Services Pvt. Ltd.,"
"Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/partner-contact',
'license': 'AGPL-3',
'depends': [
'base',
],
'installable': True,
'auto_install': False,
}
| brain-tec/partner-contact | partner_phone_search/__manifest__.py | Python | agpl-3.0 | 616 | 0 |
import tensorflow as tf
import numpy as np
import scipy.io
vgg_layers = [
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4'
]
vgg_layer_types = [
'conv', 'relu', 'conv', 'relu', 'pool',
'conv', 'relu', 'conv', 'relu', 'pool',
'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'pool',
'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'pool',
'conv', 'relu', 'conv', 'relu', 'conv', 'relu', 'conv', 'relu'
]
# Build the vgg convnet
# Returns convnet and mean pixel of the convnet
def build_net(path_network, input_image):
# Load pretrained convnet
pretrained_net = scipy.io.loadmat(path_network)
# Mean of input pixels - used to normalize input images
mean = np.mean(pretrained_net['normalization'][0][0][0], axis = (0, 1))
layers = pretrained_net['layers'][0]
convnet = {}
current = input_image
for i, name in enumerate(vgg_layers):
if vgg_layer_types[i] == 'conv':
# Convolution layer
kernel, bias = layers[i][0][0][0][0]
# (width, height, in_channels, out_channels) -> (height, width, in_channels, out_channels)
kernels = np.transpose(kernel, (1, 0, 2, 3))
bias = bias.reshape(-1)
conv = tf.nn.conv2d(current, tf.constant(kernel), strides = (1, 1, 1, 1), padding = 'SAME')
current = tf.nn.bias_add(conv, bias)
elif vgg_layer_types[i] == 'relu':
# Relu layer
current = tf.nn.relu(current)
elif vgg_layer_types[i] == 'pool':
# Pool layer
current = tf.nn.avg_pool(current, ksize = (1, 2, 2, 1), strides = (1, 2, 2, 1), padding = 'SAME')
convnet[name] = current
return convnet, mean
def pre_process_image(image, mean_pixel):
return image - mean_pixel
def restore_image(image, mean_pixel):
return image + mean_pixel
| everfor/Neural_Artistic_Style | vgg.py | Python | mit | 2,235 | 0.008501 |
'''
primepalCodeEval.py - Solution to Problem Prime Palindrome (Category - Easy)
Copyright (C) 2013, Shubham Verma
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Description:
Write a program to determine the biggest prime palindrome under 1000.
Input sample:
None
Output sample:
Your program should print the largest palindrome on stdout. i.e.
929
'''
from math import sqrt
def isPrime(num):
if num%2 == 0:
return False
else:
for i in xrange(3, int(sqrt(num)), 2):
if num % i == 0:
return False
return True
if __name__ == '__main__':
for num in reversed(xrange(1000)):
if str(num) == str(num)[::-1] and isPrime(num):
print num
break | shubhamVerma/code-eval | Category - Easy/primePalindromeCodeEval.py | Python | gpl-3.0 | 1,290 | 0.020155 |
#!/usr/bin/python -tt
"""Helper functions."""
from dns import resolver
# Exceptions
class CouldNotResolv(Exception):
"""Exception for unresolvable hostname."""
pass
def resolv(hostname):
"""Select and query DNS servers.
Args:
hostname: string, hostname
Returns:
ips: list, list of IPs
"""
ips = list()
# Create resolver object
res = resolver.Resolver()
# Choose the correct DNS servers
# Blue DNS servers
if hostname.startswith('b-'):
res.nameservers = ['172.16.2.10', '172.16.2.11']
# Green DNS servers
elif hostname.startswith('g-'):
res.nameservers = ['10.0.2.10', '10.0.2.11']
# Default to white DNS servers
else:
res.nameservers = ['194.47.252.134', '194.47.252.135']
# Query
try:
query = res.query(hostname)
for answer in query:
ips.append(answer.address)
except resolver.NXDOMAIN:
raise CouldNotResolv
# Return query result
return ips
def main():
"""Main."""
pass
if __name__ == '__main__':
main()
| MaxIV-KitsControls/netspot | netspot/lib/spotmax/helpers.py | Python | mit | 1,004 | 0.025896 |
import sys
import time
from entrypoint2 import entrypoint
import pyscreenshot
from pyscreenshot.plugins.gnome_dbus import GnomeDBusWrapper
from pyscreenshot.plugins.gnome_screenshot import GnomeScreenshotWrapper
from pyscreenshot.plugins.kwin_dbus import KwinDBusWrapper
from pyscreenshot.util import run_mod_as_subproc
def run(force_backend, n, childprocess, bbox=None):
sys.stdout.write("%-20s\t" % force_backend)
sys.stdout.flush() # before any crash
if force_backend == "default":
force_backend = None
try:
start = time.time()
for _ in range(n):
pyscreenshot.grab(
backend=force_backend, childprocess=childprocess, bbox=bbox
)
end = time.time()
dt = end - start
s = "%-4.2g sec\t" % dt
s += "(%5d ms per call)" % (1000.0 * dt / n)
sys.stdout.write(s)
finally:
print("")
novirt = [GnomeDBusWrapper.name, KwinDBusWrapper.name, GnomeScreenshotWrapper.name]
def run_all(n, childprocess_param, virtual_only=True, bbox=None):
debug = True
print("")
print("n=%s" % n)
print("------------------------------------------------------")
if bbox:
x1, y1, x2, y2 = map(str, bbox)
bbox = ":".join(map(str, (x1, y1, x2, y2)))
bboxpar = ["--bbox", bbox]
else:
bboxpar = []
if debug:
debugpar = ["--debug"]
else:
debugpar = []
for x in ["default"] + pyscreenshot.backends():
backendpar = ["--backend", x]
# skip non X backends
if virtual_only and x in novirt:
continue
p = run_mod_as_subproc(
"pyscreenshot.check.speedtest",
["--childprocess", childprocess_param] + bboxpar + debugpar + backendpar,
)
print(p.stdout)
@entrypoint
def speedtest(virtual_display=False, backend="", childprocess="", bbox="", number=10):
"""Performance test of all back-ends.
:param virtual_display: run with Xvfb
:param bbox: bounding box coordinates x1:y1:x2:y2
:param backend: back-end can be forced if set (example:default, scrot, wx,..),
otherwise all back-ends are tested
:param childprocess: pyscreenshot parameter childprocess (0/1)
:param number: number of screenshots for each backend (default:10)
"""
childprocess_param = childprocess
if childprocess == "":
childprocess = True # default
elif childprocess == "0":
childprocess = False
elif childprocess == "1":
childprocess = True
else:
raise ValueError("invalid childprocess value")
if bbox:
x1, y1, x2, y2 = map(int, bbox.split(":"))
bbox = x1, y1, x2, y2
else:
bbox = None
def f(virtual_only):
if backend:
try:
run(backend, number, childprocess, bbox=bbox)
except pyscreenshot.FailedBackendError:
pass
else:
run_all(number, childprocess_param, virtual_only=virtual_only, bbox=bbox)
if virtual_display:
from pyvirtualdisplay import Display
with Display(visible=0):
f(virtual_only=True)
else:
f(virtual_only=False)
| ponty/pyscreenshot | pyscreenshot/check/speedtest.py | Python | bsd-2-clause | 3,228 | 0.002169 |
"""
models
~~~~~~
Module containing all of our models that are typically
accessed in a CRUD like manner.
"""
from ..models.base import Model as BaseModel
from ..models.default_schema import Model as DefaultSchemaModel
from ..models.login import Model as LoginModel
MODELS = [
BaseModel,
DefaultSchemaModel,
LoginModel,
]
| sassoo/goldman | goldman/models/__init__.py | Python | mit | 353 | 0 |
from mpf.tests.MpfGameTestCase import MpfGameTestCase
class TestPlayerVars(MpfGameTestCase):
def get_config_file(self):
return 'player_vars.yaml'
def get_machine_path(self):
return 'tests/machine_files/player_vars/'
def test_initial_values(self):
self.fill_troughs()
self.start_two_player_game()
for x in range(2):
self.assertEqual(self.machine.game.player_list[x].some_var, 4)
self.assertEqual(type(self.machine.game.player_list[x].some_var), int)
self.assertEqual(self.machine.game.player_list[x].some_float, 4.0)
self.assertEqual(type(self.machine.game.player_list[x].some_float), float)
self.assertEqual(self.machine.game.player_list[x].some_string, '4')
self.assertEqual(type(self.machine.game.player_list[x].some_string), str)
self.assertEqual(self.machine.game.player_list[x].some_other_string, 'hello')
self.assertEqual(type(self.machine.game.player_list[x].some_other_string), str)
self.machine.game.player.test = 7
self.assertEqual(7, self.machine.game.player.test)
self.assertEqual(7, self.machine.game.player.vars["test"])
self.assertEqual(4, self.machine.variables.get_machine_var("test1"))
self.assertEqual('5', self.machine.variables.get_machine_var("test2"))
def test_event_kwargs(self):
self.fill_troughs()
self.start_game()
self.assertEqual(self.machine.game.player.some_var, 4)
self.mock_event('player_some_var')
self.machine.game.player.add_with_kwargs('some_var', 6, foo='bar')
self.advance_time_and_run()
self.assertEventCalledWith('player_some_var',
value=10,
prev_value=4,
change=6,
player_num=1,
foo='bar')
self.machine.game.player.set_with_kwargs('some_var', 1, bar='foo')
self.advance_time_and_run()
self.assertEventCalledWith('player_some_var',
value=1,
prev_value=10,
change=-9,
player_num=1,
bar='foo')
| missionpinball/mpf | mpf/tests/test_PlayerVars.py | Python | mit | 2,380 | 0.006303 |
#!/home/tom/ab/android/python-for-android/build/python-install/bin/python2.7
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <barry@python.org>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <person@dom.com> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
try:
asyncore.loop()
except KeyboardInterrupt:
pass
| duducosmos/pgs4a | python-install/bin/smtpd.py | Python | lgpl-2.1 | 18,597 | 0.00043 |
"""
Google's clitable.py is inherently integrated to Linux:
This is a workaround for that (basically include modified clitable code without anything
that is Linux-specific).
_clitable.py is identical to Google's as of 2017-12-17
_texttable.py is identical to Google's as of 2017-12-17
_terminal.py is a highly stripped down version of Google's such that clitable.py works
https://github.com/google/textfsm/blob/master/clitable.py
"""
# Some of this code is from Google with the following license:
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import copy
import os
import re
import threading
import copyable_regex_object
import textfsm
from netmiko._textfsm import _texttable as texttable
class Error(Exception):
"""Base class for errors."""
class IndexTableError(Error):
"""General INdexTable error."""
class CliTableError(Error):
"""General CliTable error."""
class IndexTable(object):
"""Class that reads and stores comma-separated values as a TextTable.
Stores a compiled regexp of the value for efficient matching.
Includes functions to preprocess Columns (both compiled and uncompiled).
Attributes:
index: TextTable, the index file parsed into a texttable.
compiled: TextTable, the table but with compiled regexp for each field.
"""
def __init__(self, preread=None, precompile=None, file_path=None):
"""Create new IndexTable object.
Args:
preread: func, Pre-processing, applied to each field as it is read.
precompile: func, Pre-compilation, applied to each field before compiling.
file_path: String, Location of file to use as input.
"""
self.index = None
self.compiled = None
if file_path:
self._index_file = file_path
self._index_handle = open(self._index_file, 'r')
self._ParseIndex(preread, precompile)
def __del__(self):
"""Close index handle."""
if hasattr(self, '_index_handle'):
self._index_handle.close()
def __len__(self):
"""Returns number of rows in table."""
return self.index.size
def __copy__(self):
"""Returns a copy of an IndexTable object."""
clone = IndexTable()
if hasattr(self, '_index_file'):
# pylint: disable=protected-access
clone._index_file = self._index_file
clone._index_handle = self._index_handle
clone.index = self.index
clone.compiled = self.compiled
return clone
def __deepcopy__(self, memodict=None):
"""Returns a deepcopy of an IndexTable object."""
clone = IndexTable()
if hasattr(self, '_index_file'):
# pylint: disable=protected-access
clone._index_file = copy.deepcopy(self._index_file)
clone._index_handle = open(clone._index_file, 'r')
clone.index = copy.deepcopy(self.index)
clone.compiled = copy.deepcopy(self.compiled)
return clone
def _ParseIndex(self, preread, precompile):
"""Reads index file and stores entries in TextTable.
For optimisation reasons, a second table is created with compiled entries.
Args:
preread: func, Pre-processing, applied to each field as it is read.
precompile: func, Pre-compilation, applied to each field before compiling.
Raises:
IndexTableError: If the column headers has illegal column labels.
"""
self.index = texttable.TextTable()
self.index.CsvToTable(self._index_handle)
if preread:
for row in self.index:
for col in row.header:
row[col] = preread(col, row[col])
self.compiled = copy.deepcopy(self.index)
for row in self.compiled:
for col in row.header:
if precompile:
row[col] = precompile(col, row[col])
if row[col]:
row[col] = copyable_regex_object.CopyableRegexObject(row[col])
def GetRowMatch(self, attributes):
"""Returns the row number that matches the supplied attributes."""
for row in self.compiled:
try:
for key in attributes:
# Silently skip attributes not present in the index file.
# pylint: disable=E1103
if key in row.header and row[key] and not row[key].match(attributes[key]):
# This line does not match, so break and try next row.
raise StopIteration()
return row.row
except StopIteration:
pass
return 0
class CliTable(texttable.TextTable):
"""Class that reads CLI output and parses into tabular format.
Reads an index file and uses it to map command strings to templates. It then
uses TextFSM to parse the command output (raw) into a tabular format.
The superkey is the set of columns that contain data that uniquely defines the
row, the key is the row number otherwise. This is typically gathered from the
templates 'Key' value but is extensible.
Attributes:
raw: String, Unparsed command string from device/command.
index_file: String, file where template/command mappings reside.
template_dir: String, directory where index file and templates reside.
"""
# Parse each template index only once across all instances.
# Without this, the regexes are parsed at every call to CliTable().
_lock = threading.Lock()
INDEX = {}
# pylint: disable=C6409
def synchronised(func):
"""Synchronisation decorator."""
# pylint: disable=E0213
def Wrapper(main_obj, *args, **kwargs):
main_obj._lock.acquire() # pylint: disable=W0212
try:
return func(main_obj, *args, **kwargs) # pylint: disable=E1102
finally:
main_obj._lock.release() # pylint: disable=W0212
return Wrapper
# pylint: enable=C6409
@synchronised
def __init__(self, index_file=None, template_dir=None):
"""Create new CLiTable object.
Args:
index_file: String, file where template/command mappings reside.
template_dir: String, directory where index file and templates reside.
"""
# pylint: disable=E1002
super(CliTable, self).__init__()
self._keys = set()
self.raw = None
self.index_file = index_file
self.template_dir = template_dir
if index_file:
self.ReadIndex(index_file)
def ReadIndex(self, index_file=None):
"""Reads the IndexTable index file of commands and templates.
Args:
index_file: String, file where template/command mappings reside.
Raises:
CliTableError: A template column was not found in the table.
"""
self.index_file = index_file or self.index_file
fullpath = os.path.join(self.template_dir, self.index_file)
if self.index_file and fullpath not in self.INDEX:
self.index = IndexTable(self._PreParse, self._PreCompile, fullpath)
self.INDEX[fullpath] = self.index
else:
self.index = self.INDEX[fullpath]
# Does the IndexTable have the right columns.
if 'Template' not in self.index.index.header: # pylint: disable=E1103
raise CliTableError("Index file does not have 'Template' column.")
def _TemplateNamesToFiles(self, template_str):
"""Parses a string of templates into a list of file handles."""
template_list = template_str.split(':')
template_files = []
try:
for tmplt in template_list:
template_files.append(
open(os.path.join(self.template_dir, tmplt), 'r'))
except: # noqa
for tmplt in template_files:
tmplt.close()
raise
return template_files
def ParseCmd(self, cmd_input, attributes=None, templates=None):
"""Creates a TextTable table of values from cmd_input string.
Parses command output with template/s. If more than one template is found
subsequent tables are merged if keys match (dropped otherwise).
Args:
cmd_input: String, Device/command response.
attributes: Dict, attribute that further refine matching template.
templates: String list of templates to parse with. If None, uses index
Raises:
CliTableError: A template was not found for the given command.
"""
# Store raw command data within the object.
self.raw = cmd_input
if not templates:
# Find template in template index.
row_idx = self.index.GetRowMatch(attributes)
if row_idx:
templates = self.index.index[row_idx]['Template']
else:
raise CliTableError('No template found for attributes: "%s"' %
attributes)
template_files = self._TemplateNamesToFiles(templates)
try:
# Re-initialise the table.
self.Reset()
self._keys = set()
self.table = self._ParseCmdItem(self.raw, template_file=template_files[0])
# Add additional columns from any additional tables.
for tmplt in template_files[1:]:
self.extend(self._ParseCmdItem(self.raw, template_file=tmplt),
set(self._keys))
finally:
for f in template_files:
f.close()
def _ParseCmdItem(self, cmd_input, template_file=None):
"""Creates Texttable with output of command.
Args:
cmd_input: String, Device response.
template_file: File object, template to parse with.
Returns:
TextTable containing command output.
Raises:
CliTableError: A template was not found for the given command.
"""
# Build FSM machine from the template.
fsm = textfsm.TextFSM(template_file)
if not self._keys:
self._keys = set(fsm.GetValuesByAttrib('Key'))
# Pass raw data through FSM.
table = texttable.TextTable()
table.header = fsm.header
# Fill TextTable from record entries.
for record in fsm.ParseText(cmd_input):
table.Append(record)
return table
def _PreParse(self, key, value):
"""Executed against each field of each row read from index table."""
if key == 'Command':
return re.sub(r'(\[\[.+?\]\])', self._Completion, value)
else:
return value
def _PreCompile(self, key, value):
"""Executed against each field of each row before compiling as regexp."""
if key == 'Template':
return
else:
return value
def _Completion(self, match):
# pylint: disable=C6114
r"""Replaces double square brackets with variable length completion.
Completion cannot be mixed with regexp matching or '\' characters
i.e. '[[(\n)]] would become (\(n)?)?.'
Args:
match: A regex Match() object.
Returns:
String of the format '(a(b(c(d)?)?)?)?'.
"""
# Strip the outer '[[' & ']]' and replace with ()? regexp pattern.
word = str(match.group())[2:-2]
return '(' + ('(').join(word) + ')?' * len(word)
def LabelValueTable(self, keys=None):
"""Return LabelValue with FSM derived keys."""
keys = keys or self.superkey
# pylint: disable=E1002
return super(CliTable, self).LabelValueTable(keys)
# pylint: disable=W0622,C6409
def sort(self, cmp=None, key=None, reverse=False):
"""Overrides sort func to use the KeyValue for the key."""
if not key and self._keys:
key = self.KeyValue
super(CliTable, self).sort(cmp=cmp, key=key, reverse=reverse)
# pylint: enable=W0622
def AddKeys(self, key_list):
"""Mark additional columns as being part of the superkey.
Supplements the Keys already extracted from the FSM template.
Useful when adding new columns to existing tables.
Note: This will impact attempts to further 'extend' the table as the
superkey must be common between tables for successful extension.
Args:
key_list: list of header entries to be included in the superkey.
Raises:
KeyError: If any entry in list is not a valid header entry.
"""
for keyname in key_list:
if keyname not in self.header:
raise KeyError("'%s'" % keyname)
self._keys = self._keys.union(set(key_list))
@property
def superkey(self):
"""Returns a set of column names that together constitute the superkey."""
sorted_list = []
for header in self.header:
if header in self._keys:
sorted_list.append(header)
return sorted_list
def KeyValue(self, row=None):
"""Returns the super key value for the row."""
if not row:
if self._iterator:
# If we are inside an iterator use current row iteration.
row = self[self._iterator]
else:
row = self.row
# If no superkey then use row number.
if not self.superkey:
return ['%s' % row.row]
sorted_list = []
for header in self.header:
if header in self.superkey:
sorted_list.append(row[header])
return sorted_list
| fooelisa/netmiko | netmiko/_textfsm/_clitable.py | Python | mit | 12,990 | 0.007467 |
from coc_war_planner.api.permissions import CreateNotAllowed
from coc_war_planner.api.permissions import IsChiefOrReadOnly
from coc_war_planner.api.permissions import IsUserOrReadOnly
from coc_war_planner.api.permissions import IsOwnerOrReadOnly
from coc_war_planner.api.permissions import IsNotPartOfClanOrCreateNotAllowed
from coc_war_planner.api.serializers import ClanSerializer
from coc_war_planner.api.serializers import ClanPutSerializer
from coc_war_planner.api.serializers import MemberGetSerializer
from coc_war_planner.api.serializers import MemberSerializer
from coc_war_planner.api.serializers import TroopsPostSerializer
from coc_war_planner.api.serializers import TroopsPutSerializer
from coc_war_planner.api.serializers import TroopsGetSerializer
from coc_war_planner.core.models import Clan
from coc_war_planner.core.models import Member
from coc_war_planner.core.models import Troops
from coc_war_planner.core.models import TroopLevel
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from rest_framework import filters
from rest_framework import permissions
from rest_framework import serializers
from rest_framework import viewsets
class ClanViewSet(viewsets.ModelViewSet):
queryset = Clan.objects.all()
serializer_class = ClanSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsChiefOrReadOnly,
IsNotPartOfClanOrCreateNotAllowed)
filter_backends = (filters.OrderingFilter, filters.SearchFilter,)
ordering_fields = ('name', 'pin',)
ordering = 'name' # default ordering
search_fields = ('name', 'pin',)
def perform_create(self, serializer):
instance = serializer.save(chief=self.request.user.member)
self.request.user.member.clan = instance
self.request.user.member.save()
def get_serializer_class(self):
if self.request.method == 'PUT':
return ClanPutSerializer
return ClanSerializer
class MemberViewSet(viewsets.ModelViewSet):
queryset = Member.objects.all()
serializer_class = MemberSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
CreateNotAllowed,
IsUserOrReadOnly,)
def get_serializer_class(self):
if self.request.method == 'GET':
return MemberGetSerializer
return MemberSerializer
class TroopsViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
def get_serializer_class(self):
if self.request.method == 'POST':
return TroopsPostSerializer
elif self.request.method == 'PUT':
return TroopsPutSerializer
return TroopsGetSerializer
def get_queryset(self):
member_id = self.request.GET.get('member_id', self.request.user.member.id)
if member_id is None:
raise serializers.ValidationError({
'member_id': 'Parameter is missing.'
})
troops = Troops.objects.filter(member_id=member_id)
troops_id = self.kwargs.get(self.lookup_field)
if troops_id:
troops = troops.filter(pk=troops_id)
return troops
| arthurio/coc_war_planner | coc_war_planner/api/views.py | Python | mit | 3,228 | 0.002478 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Purchase orders - Force number",
'version': '0.1',
'category': 'Purchase Management',
'summary': "Force purchase orders numeration",
'description': """
This simple module allows to specify the number to use when creating purchase
orders. If user does not change the default value ('/'), the standard sequence
is used.""",
'author': "Agile Business Group,Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": ['purchase'],
"data": [
'purchase_view.xml',
],
"demo": [],
"active": False,
"installable": False
}
| andrius-preimantas/purchase-workflow | purchase_order_force_number/__openerp__.py | Python | agpl-3.0 | 1,589 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/team-5472/Desktop/Self-Driving-Car/ros/devel;/opt/ros/kinetic'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| Aramist/Self-Driving-Car | ros/install/_setup_util.py | Python | mit | 12,461 | 0.002568 |
#!/usr/bin/python
"""
Small web application to retrieve genes from the tomato genome
annotation involved to a specified pathways.
"""
import flask
from flaskext.wtf import Form, TextField
import ConfigParser
import datetime
import json
import os
import rdflib
import urllib
CONFIG = ConfigParser.ConfigParser()
CONFIG.readfp(open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'path2gene.cfg')))
# Address of the sparql server to query.
SERVER = CONFIG.get('path2gene', 'sparql_server')
# Create the application.
APP = flask.Flask(__name__)
APP.secret_key = CONFIG.get('path2gene', 'secret_key')
# Stores in which graphs are the different source of information.
GRAPHS = {option: CONFIG.get('graph', option) for option in CONFIG.options('graph')}
class PathwayForm(Form):
""" Simple text field form to input the pathway of interest.
"""
pathway_name = TextField('Pathway name (or part of it)')
def search_pathway_in_db(name):
""" Search the uniprot database for pathways having the given string
in their name. It returns a list of these pathways.
@param name, a string, name or part of the name of the pathway to
search in uniprot.
@return, a list of the pathway names found for having the given
string.
"""
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX uniprot:<http://purl.uniprot.org/core/>
SELECT DISTINCT ?pathdesc
FROM <%(uniprot)s>
WHERE{
?prot uniprot:annotation ?annot .
?annot rdfs:seeAlso ?url .
?annot rdfs:comment ?pathdesc .
FILTER (
regex(?pathdesc, "%(search)s", "i")
)
} ORDER BY ASC(?pathdesc)
''' % {'search': name, 'uniprot': GRAPHS['uniprot']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
pathways = []
for entry in data_js['results']['bindings']:
pathways.append(entry['pathdesc']['value'])
return pathways
def get_gene_of_pathway(pathway):
""" Retrieve all the gene associated with pathways containing the
given string.
@param name, a string, name of the pathway for which to retrieve the
genes in the tomato genome annotation.
@return, a hash of the genes name and description found to be
associated with the specified pathway.
"""
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX uniprot:<http://purl.uniprot.org/core/>
SELECT DISTINCT ?gene ?desc ?pathdesc
FROM <%(itag)s>
FROM <%(uniprot)s>
WHERE{
?geneobj gene:Protein ?prot .
?geneobj gene:Description ?desc .
?geneobj gene:FeatureName ?gene .
?prot uniprot:annotation ?annot .
?annot rdfs:seeAlso ?url .
?annot rdfs:comment ?pathdesc .
FILTER (
regex(?pathdesc, "%(search)s", "i")
)
} ORDER BY ASC(?gene)
''' % {'search': pathway, 'uniprot': GRAPHS['uniprot'],
'itag': GRAPHS['itag']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
genes = {}
for entry in data_js['results']['bindings']:
genes[entry['gene']['value']] = [entry['desc']['value'],
entry['pathdesc']['value']]
return genes
def get_gene_of_pathway_strict(pathway):
""" Retrieve all the gene associated with the given pathway.
@param name, a string, name of the pathway for which to retrieve the
genes in the tomato genome annotation.
@return, a hash of the genes name and description found to be
associated with the specified pathway.
"""
query = '''
PREFIX gene:<http://pbr.wur.nl/GENE#>
PREFIX rdfs:<http://www.w3.org/2000/01/rdf-schema#>
PREFIX uniprot:<http://purl.uniprot.org/core/>
SELECT DISTINCT ?gene ?desc
FROM <%(itag)s>
FROM <%(uniprot)s>
WHERE{
?geneobj gene:Protein ?prot .
?geneobj gene:Description ?desc .
?geneobj gene:FeatureName ?gene .
?prot uniprot:annotation ?annot .
?annot rdfs:seeAlso ?url .
?annot rdfs:comment "%(search)s" .
} ORDER BY ASC(?gene)
''' % {'search': pathway, 'uniprot': GRAPHS['uniprot'],
'itag': GRAPHS['itag']}
data_js = sparql_query(query, SERVER)
if not data_js:
return
genes = {}
for entry in data_js['results']['bindings']:
genes[entry['gene']['value']] = [entry['desc']['value'],
pathway]
return genes
def sparql_query(query, server, output_format='application/json'):
""" Runs the given SPARQL query against the desired sparql endpoint
and return the output in the format asked (default being rdf/xml).
@param query, the string of the sparql query that should be ran.
@param server, a string, the url of the sparql endpoint that we want
to run query against.
@param format, specifies in which format we want to have the output.
Defaults to `application/json` but can also be `application/rdf+xml`.
@return, a JSON object, representing the output of the provided
sparql query.
"""
params = {
'default-graph': '',
'should-sponge': 'soft',
'query': query,
'debug': 'off',
'timeout': '',
'format': output_format,
'save': 'display',
'fname': ''
}
querypart = urllib.urlencode(params)
response = urllib.urlopen(server, querypart).read()
try:
output = json.loads(response)
except ValueError:
output = {}
return output
## Web-app
@APP.route('/', methods=['GET', 'POST'])
def index():
""" Shows the front page.
All the content of this page is in the index.html file under the
templates directory. The file is full html and has no templating
logic within.
"""
print 'path2gene %s -- %s -- %s' % (datetime.datetime.now(),
flask.request.remote_addr, flask.request.url)
form = PathwayForm(csrf_enabled=False)
if form.validate_on_submit():
return flask.redirect(flask.url_for('search_pathway',
name=form.pathway_name.data))
return flask.render_template('index.html', form=form)
@APP.route('/search/<name>')
def search_pathway(name):
""" Search the database for pathways containing the given string.
"""
print 'path2gene %s -- %s -- %s' % (datetime.datetime.now(),
flask.request.remote_addr, flask.request.url)
pathways = search_pathway_in_db(name)
core = []
for path in pathways:
core.append('%s*' % path.split(';')[0].strip())
core = list(set(core))
return flask.render_template('search.html', data=pathways,
search=name, core=core)
@APP.route('/path/<path:pathway>')
def pathway(pathway):
""" Show for the given pathways all the genes found to be related.
"""
print 'path2gene %s -- %s -- %s' % (datetime.datetime.now(),
flask.request.remote_addr, flask.request.url)
if pathway.endswith('*'):
genes = get_gene_of_pathway(pathway[:-1])
else:
genes = get_gene_of_pathway_strict(pathway)
geneids = genes.keys()
geneids.sort()
return flask.render_template('output.html', pathway=pathway,
genes=genes, geneids=geneids)
@APP.route('/csv/<path:pathway>.csv')
def generate_csv(pathway):
""" Generate a comma separated value file containing all the
information.
"""
print 'path2gene %s -- %s -- %s' % (datetime.datetime.now(),
flask.request.remote_addr, flask.request.url)
# Regenerate the informations
if pathway.endswith('*'):
genes = get_gene_of_pathway(pathway[:-1])
else:
genes = get_gene_of_pathway_strict(pathway)
string = 'Gene ID, Gene description, Pathway\n'
for gene in genes:
string = string + "%s, %s, %s\n" % (gene, genes[gene][0],
genes[gene][1])
return flask.Response(string, mimetype='application/excel')
if __name__ == '__main__':
APP.debug = True
APP.run()
| PBR/path2gene | path2gene.py | Python | bsd-3-clause | 8,068 | 0.005949 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from blinkpy.common.host_mock import MockHost
class MockBlinkTool(MockHost):
def __init__(self, *args, **kwargs):
MockHost.__init__(self, *args, **kwargs)
def path(self):
return 'echo'
| scheib/chromium | third_party/blink/tools/blinkpy/tool/mock_tool.py | Python | bsd-3-clause | 1,739 | 0 |
"""
URLs for the Enrollment API
"""
from django.conf import settings
from django.conf.urls import url
from .views import (
CourseEnrollmentsApiListView,
EnrollmentCourseDetailView,
EnrollmentListView,
EnrollmentUserRolesView,
EnrollmentView,
UnenrollmentView
)
urlpatterns = [
url(r'^enrollment/{username},{course_key}$'.format(
username=settings.USERNAME_PATTERN,
course_key=settings.COURSE_ID_PATTERN),
EnrollmentView.as_view(), name='courseenrollment'),
url(r'^enrollment/{course_key}$'.format(course_key=settings.COURSE_ID_PATTERN),
EnrollmentView.as_view(), name='courseenrollment'),
url(r'^enrollment$', EnrollmentListView.as_view(), name='courseenrollments'),
url(r'^enrollments/?$', CourseEnrollmentsApiListView.as_view(), name='courseenrollmentsapilist'),
url(r'^course/{course_key}$'.format(course_key=settings.COURSE_ID_PATTERN),
EnrollmentCourseDetailView.as_view(), name='courseenrollmentdetails'),
url(r'^unenroll/$', UnenrollmentView.as_view(), name='unenrollment'),
url(r'^roles/$', EnrollmentUserRolesView.as_view(), name='roles'),
]
| stvstnfrd/edx-platform | openedx/core/djangoapps/enrollments/urls.py | Python | agpl-3.0 | 1,148 | 0.002613 |
def getitem(v,d):
"Returns the value of entry d in v"
assert d in v.D
return v.f[d] if d in v.f else 0
def setitem(v,d,val):
"Set the element of v with label d to be val"
assert d in v.D
v.f[d] = val
def equal(u,v):
"Returns true iff u is equal to v"
assert u.D == v.D
union = set(u.f) | set (v.f)
for k in union:
uval = u.f[k] if k in u.f else 0
vval = v.f[k] if k in v.f else 0
if uval != vval:
return False
return True
def add(u,v):
"Returns the sum of the two vectors"
assert u.D == v.D
ukeys = set(u.f)
vkeys = set (v.f)
both = ukeys & vkeys
uonly = ukeys - both
vonly = vkeys - both
f = {}
for k in both:
f[k] = u.f[k] + v.f[k]
for k in uonly:
f[k] = u.f[k]
for k in vonly:
f[k] = v.f[k]
return Vec (u.D | v.D, f)
def dot(u,v):
"Returns the dot product of the two vectors"
assert u.D == v.D
ukeys = set(u.f)
vkeys = set (v.f)
both = ukeys & vkeys
return sum([u.f[k] * v.f[k] for k in both])
def scalar_mul(v, alpha):
"Returns the scalar-vector product alpha times v"
f = {k: alpha * v.f[k] for k in v.f}
return (Vec(v.D, f))
def neg(v):
"Returns the negation of a vector"
return scalar_mul (v, -1)
def toStr(v):
"pretty-printing"
try:
D_list = sorted(v.D)
except TypeError:
D_list = sorted(v.D, key=hash)
numdec = 3
wd = dict([(k,(1+max(len(str(k)), len('{0:.{1}G}'.format(v[k], numdec))))) if isinstance(v[k], int) or isinstance(v[k], float) else (k,(1+max(len(str(k)), len(str(v[k]))))) for k in D_list])
# w = 1+max([len(str(k)) for k in D_list]+[len('{0:.{1}G}'.format(value,numdec)) for value in v.f.values()])
s1 = ''.join(['{0:>{1}}'.format(k,wd[k]) for k in D_list])
s2 = ''.join(['{0:>{1}.{2}G}'.format(v[k],wd[k],numdec) if isinstance(v[k], int) or isinstance(v[k], float) else '{0:>{1}}'.format(v[k], wd[k]) for k in D_list])
return "\n" + s1 + "\n" + '-'*sum(wd.values()) +"\n" + s2
##### NO NEED TO MODIFY BELOW HERE #####
class Vec:
"""
A vector has two fields:
D - the domain (a set)
f - a dictionary mapping (some) domain elements to field elements
elements of D not appearing in f are implicitly mapped to zero
"""
def __init__(self, labels, function):
self.D = labels
self.f = function
__getitem__ = getitem
__setitem__ = setitem
__neg__ = neg
__rmul__ = scalar_mul #if left arg of * is primitive, assume it's a scalar
def __mul__(self,other):
#If other is a vector, returns the dot product of self and other
if isinstance(other, Vec):
return dot(self,other)
else:
return NotImplemented # Will cause other.__rmul__(self) to be invoked
def __truediv__(self,other): # Scalar division
return (1/other)*self
__add__ = add
def __radd__(self, other):
"Hack to allow sum(...) to work with vectors"
if other == 0:
return self
# def __sub__(self, a,b):
# "Returns a vector which is the difference of a and b."
# return a+(-b)
def __sub__(self, other):
"Returns a vector which is the difference of a and b."
return self+(-other)
__eq__ = equal
__str__ = toStr
def __repr__(self):
return "Vec(" + str(self.D) + "," + str(self.f) + ")"
def copy(self):
"Don't make a new copy of the domain D"
return Vec(self.D, self.f.copy())
| tri2sing/LinearAlgebraPython | vec.py | Python | apache-2.0 | 3,573 | 0.012315 |
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from catalyst.utils.argcheck import (
verify_callable_argspec,
Argument,
NoStarargs,
UnexpectedStarargs,
NoKwargs,
UnexpectedKwargs,
NotCallable,
NotEnoughArguments,
TooManyArguments,
MismatchedArguments,
)
class TestArgCheck(TestCase):
def test_not_callable(self):
"""
Check the results of a non-callable object.
"""
not_callable = 'a'
with self.assertRaises(NotCallable):
verify_callable_argspec(not_callable)
def test_no_starargs(self):
"""
Tests when a function does not have *args and it was expected.
"""
def f(a):
pass
with self.assertRaises(NoStarargs):
verify_callable_argspec(f, expect_starargs=True)
def test_starargs(self):
"""
Tests when a function has *args and it was expected.
"""
def f(*args):
pass
verify_callable_argspec(f, expect_starargs=True)
def test_unexcpected_starargs(self):
"""
Tests a function that unexpectedly accepts *args.
"""
def f(*args):
pass
with self.assertRaises(UnexpectedStarargs):
verify_callable_argspec(f, expect_starargs=False)
def test_ignore_starargs(self):
"""
Tests checking a function ignoring the presence of *args.
"""
def f(*args):
pass
def g():
pass
verify_callable_argspec(f, expect_starargs=Argument.ignore)
verify_callable_argspec(g, expect_starargs=Argument.ignore)
def test_no_kwargs(self):
"""
Tests when a function does not have **kwargs and it was expected.
"""
def f():
pass
with self.assertRaises(NoKwargs):
verify_callable_argspec(f, expect_kwargs=True)
def test_kwargs(self):
"""
Tests when a function has **kwargs and it was expected.
"""
def f(**kwargs):
pass
verify_callable_argspec(f, expect_kwargs=True)
def test_unexpected_kwargs(self):
"""
Tests a function that unexpectedly accepts **kwargs.
"""
def f(**kwargs):
pass
with self.assertRaises(UnexpectedKwargs):
verify_callable_argspec(f, expect_kwargs=False)
def test_ignore_kwargs(self):
"""
Tests checking a function ignoring the presence of **kwargs.
"""
def f(**kwargs):
pass
def g():
pass
verify_callable_argspec(f, expect_kwargs=Argument.ignore)
verify_callable_argspec(g, expect_kwargs=Argument.ignore)
def test_arg_subset(self):
"""
Tests when the args are a subset of the expectations.
"""
def f(a, b):
pass
with self.assertRaises(NotEnoughArguments):
verify_callable_argspec(
f, [Argument('a'), Argument('b'), Argument('c')]
)
def test_arg_superset(self):
def f(a, b, c):
pass
with self.assertRaises(TooManyArguments):
verify_callable_argspec(f, [Argument('a'), Argument('b')])
def test_no_default(self):
"""
Tests when an argument expects a default and it is not present.
"""
def f(a):
pass
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(f, [Argument('a', 1)])
def test_default(self):
"""
Tests when an argument expects a default and it is present.
"""
def f(a=1):
pass
verify_callable_argspec(f, [Argument('a', 1)])
def test_ignore_default(self):
"""
Tests that ignoring defaults works as intended.
"""
def f(a=1):
pass
verify_callable_argspec(f, [Argument('a')])
def test_mismatched_args(self):
def f(a, b):
pass
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(f, [Argument('c'), Argument('d')])
def test_ignore_args(self):
"""
Tests the ignore argument list feature.
"""
def f(a):
pass
def g():
pass
h = 'not_callable'
verify_callable_argspec(f)
verify_callable_argspec(g)
with self.assertRaises(NotCallable):
verify_callable_argspec(h)
def test_out_of_order(self):
"""
Tests the case where arguments are not in the correct order.
"""
def f(a, b):
pass
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(f, [Argument('b'), Argument('a')])
def test_wrong_default(self):
"""
Tests the case where a default is expected, but the default provided
does not match the one expected.
"""
def f(a=1):
pass
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(f, [Argument('a', 2)])
def test_any_default(self):
"""
Tests the any_default option.
"""
def f(a=1):
pass
def g(a=2):
pass
def h(a):
pass
expected_args = [Argument('a', Argument.any_default)]
verify_callable_argspec(f, expected_args)
verify_callable_argspec(g, expected_args)
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(h, expected_args)
def test_ignore_name(self):
"""
Tests ignoring a param name.
"""
def f(a):
pass
def g(b):
pass
def h(c=1):
pass
expected_args = [Argument(Argument.ignore, Argument.no_default)]
verify_callable_argspec(f, expected_args)
verify_callable_argspec(f, expected_args)
with self.assertRaises(MismatchedArguments):
verify_callable_argspec(h, expected_args)
def test_bound_method(self):
class C(object):
def f(self, a, b):
pass
method = C().f
verify_callable_argspec(method, [Argument('a'), Argument('b')])
with self.assertRaises(NotEnoughArguments):
# Assert that we don't count self.
verify_callable_argspec(
method,
[Argument('self'), Argument('a'), Argument('b')],
)
| enigmampc/catalyst | tests/utils/test_argcheck.py | Python | apache-2.0 | 7,116 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the analysis mediator."""
from __future__ import unicode_literals
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.analysis import mediator
from plaso.containers import sessions
from plaso.storage.fake import writer as fake_writer
from tests.analysis import test_lib
class AnalysisMediatorTest(test_lib.AnalysisPluginTestCase):
"""Tests for the analysis mediator."""
def testGetDisplayNameForPathSpec(self):
"""Tests the GetDisplayNameForPathSpec function."""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
knowledge_base = self._SetUpKnowledgeBase()
analysis_mediator = mediator.AnalysisMediator(
storage_writer, knowledge_base)
test_path = self._GetTestFilePath(['syslog.gz'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
expected_display_name = 'OS:{0:s}'.format(test_path)
display_name = analysis_mediator.GetDisplayNameForPathSpec(os_path_spec)
self.assertEqual(display_name, expected_display_name)
# TODO: add test for GetUsernameForPath.
# TODO: add test for ProduceAnalysisReport.
# TODO: add test for ProduceEventTag.
def testSignalAbort(self):
"""Tests the SignalAbort function."""
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
knowledge_base = self._SetUpKnowledgeBase()
analysis_mediator = mediator.AnalysisMediator(
storage_writer, knowledge_base)
analysis_mediator.SignalAbort()
if __name__ == '__main__':
unittest.main()
| rgayon/plaso | tests/analysis/mediator.py | Python | apache-2.0 | 1,743 | 0.004016 |
"""
This module describes the unlogged state of the default game.
The setting STATE_UNLOGGED should be set to the python path
of the state instance in this module.
"""
from evennia.commands.cmdset import CmdSet
from evennia.commands.default import unloggedin
class UnloggedinCmdSet(CmdSet):
"""
Sets up the unlogged cmdset.
"""
key = "DefaultUnloggedin"
priority = 0
def at_cmdset_creation(self):
"Populate the cmdset"
self.add(unloggedin.CmdUnconnectedConnect())
self.add(unloggedin.CmdUnconnectedCreate())
self.add(unloggedin.CmdUnconnectedQuit())
self.add(unloggedin.CmdUnconnectedLook())
self.add(unloggedin.CmdUnconnectedHelp())
self.add(unloggedin.CmdUnconnectedEncoding())
self.add(unloggedin.CmdUnconnectedScreenreader())
| MarsZone/DreamLand | evennia/evennia/commands/default/cmdset_unloggedin.py | Python | bsd-3-clause | 821 | 0 |
#!/usr/bin/env python3
#This extracts data from xml plists
#
#########################COPYRIGHT INFORMATION############################
#Copyright (C) 2013 dougkoster@hotmail.com #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
#
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
#
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see http://www.gnu.org/licenses/. #
#########################COPYRIGHT INFORMATION############################
from parted import *
from mount import *
from mount_ewf import *
from done import *
from unix2dos import *
from mmls import *
from check_for_folder import *
from calculate_md5 import *
import os
from os.path import join
import re
import io
import sys
import string
import subprocess
import datetime
import shutil
import plistlib
import xml.parsers.expat as expat
def get_system_version(plist_info, abs_file_path, md5, export_file, outfile, key_name):
plist_type = type(plist_info)
print("The plist type is: " + str(plist_type))
if(type(plist_info) is dict):
export_file.write('File Path: ' + "\t" + abs_file_path + "\n")
export_file.write('MD5: ' + "\t\t" + str(md5) + "\n\n")
print(abs_file_path + " has a plist attribute that is a dict")
process_dict(plist_info, outfile, export_file, key_name)
elif(str(type(plist_info)) == "<class 'plistlib._InternalDict'>"):
export_file.write('File Path: ' + "\t" + abs_file_path + "\n")
export_file.write('MD5: ' + "\t\t" + str(md5) + "\n")
print(abs_file_path + " has a plist attribute that is an internal dict")
process_dict(plist_info, outfile, export_file, key_name)
def process_dict(dictionary_plist, outfile, export_file, key_name):
#loop through dict plist
for key,value in sorted(dictionary_plist.items()):
if(key_name == key):
print("The key is: " + key + " The key_name is: " + key_name)
export_file.write(key + "=> " + value)
#figure out cat type
if(re.search('10.9', value)):
export_file.write("(Mavericks)")
elif(re.search('10.8', value)):
export_file.write("(Mountain Lion)")
elif(re.search('10.7', value)):
export_file.write("(Lion)")
elif(re.search('10.6', value)):
export_file.write("(Snow Leopard)")
elif(re.search('10.5', value)):
export_file.write("(Leopard)")
elif(re.search('10.4', value)):
export_file.write("(Tiger)")
elif(re.search('10.3', value)):
export_file.write("(Panther)")
elif(re.search('10.2', value)):
export_file.write("(Jaguar)")
elif(re.search('10.1', value)):
export_file.write("(Puma)")
elif(re.search('10.0', value)):
export_file.write("(Kodiak)")
return key
| mantarayforensics/mantaray | Tools/Python/get_system_version.py | Python | gpl-3.0 | 3,323 | 0.020163 |
# -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class HelloSignHookTests(WebhookTestCase):
STREAM_NAME = 'hellosign'
URL_TEMPLATE = "/api/v1/external/hellosign?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'hellosign'
def test_signatures_message(self):
# type: () -> None
expected_subject = "NDA with Acme Co."
expected_message = ("The NDA with Acme Co. is awaiting the signature of "
"Jack and was just signed by Jill.")
self.send_and_test_stream_message('signatures', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data("hellosign", fixture_name, file_type="json")
| sonali0901/zulip | zerver/webhooks/hellosign/tests.py | Python | apache-2.0 | 884 | 0.005656 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
import _tifffile
except ImportError:
warnings.warn(
"failed to import the optional _tifffile C extension module.\n"
"Loading of some compressed images will be slow.\n"
"Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2014.08.24'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = '{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size * \
data.dtype.itemsize > 2000 * 2 ** 20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder + 'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder + 'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder + self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(
data,
dtype=byteorder +
data.dtype.char,
order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x) if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume:
# use tiles to save volume data
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder + fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count) + dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count) + dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
# use one strip or tile per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size * data.dtype.itemsize
> 2 ** 31 - 1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex * tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex * tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex * tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('test.tif', key=0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, str) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, str):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('test.tif') as tif:
... data = tif.asarray()
... data.shape
(256, 256, 4)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder + 'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder + 'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2 ** 32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i + 1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape) - 2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size' + ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First' + ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i + 1, size)
record.axes = record.axes.replace(axis, axis + newaxis, 1)
record.shape = tuple(record.shape)
# squeeze dimensions
for record in result:
record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_' + name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_' + name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v - value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v - value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
# else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2 ** self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i - tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td * tile_depth, tl * tile_length, tw * tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td + tile_depth,
tl:tl + tile_length, tw:tw + tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index + size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2 ** bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3 + i]]
else:
result = result[:, [0, 1, 2, 3 + i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i + 1]
# no data/ignore offset
or byte_counts[i + 1] == 0
for i in range(len(offsets) - 1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count * int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(
byteorder +
tof,
value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif")
>>> tifs.shape, tifs.axes
((2, 100), 'CT')
>>> data = tifs.asarray()
>>> data.shape
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, str):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
# if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i - j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i - j + 1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, str):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder + dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder + dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(str(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder + 'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2 * count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6 * plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2 * plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4 * count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2 ** 10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2 ** 10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII" * count, fh.read(20 * count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder + ('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
# return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder + '4sI' * ntypes, data[4:4 + ntypes * 8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if not package:
module = import_module(module)
else:
module = import_module('.' + module, package=package)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i + n])
i += n
elif n > 129:
result_extend(encoded[i:i + 1] * (258 - n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start + 4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00' * (4 - len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9 * '1' + '0' * 23, 2)),
511: (10, 22, int(10 * '1' + '0' * 22, 2)),
1023: (11, 21, int(11 * '1' + '0' * 21, 2)),
2047: (12, 20, int(12 * '1' + '0' * 20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen * itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes * 8 - itemsize
bitmask = int(itemsize * '1' + '0' * shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data) * 8 // (runlen * itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start + itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00' * (itembytes - len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i + 1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(
i <= dtype.itemsize * 8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize * 8 >= bits)
data = numpy.fromstring(data, dtype.byteorder + dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i + 1:]))
t &= int('0b' + '1' * bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2 ** o - 1) // (2 ** bps - 1)
t //= 2 ** (o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00')
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00')
b'string\\x00string\\n'
>>> stripascii(b'\\x00')
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i + 1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return value,
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time() - t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful + failed, time.time() - start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
# 14: '', # UNICODE
# 15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
# 4: 'void',
# 5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
# 'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
# 66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2 ** 32 - 1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
# if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2 ** bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03 * (dims + 2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = str(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03 * (axis + 1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis] - 1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape) - 1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape) - 1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
# if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time() - start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time() - start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main())
| to266/hyperspy | hyperspy/external/tifffile.py | Python | gpl-3.0 | 172,696 | 0.000029 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from segwit import send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework import blocktools
from test_framework.mininode import CTransaction
from test_framework.util import *
from test_framework.util import *
import io
import time
# Sequence number that is BIP 125 opt-in and BIP 68-compliant
BIP125_SEQUENCE_NUMBER = 0xfffffffd
WALLET_PASSPHRASE = "test"
WALLET_PASSPHRASE_TIMEOUT = 3600
class BumpFeeTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self, split=False):
extra_args = [["-debug", "-prematurewitness", "-walletprematurewitness", "-walletrbf={}".format(i)]
for i in range(self.num_nodes)]
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
# Encrypt wallet for test_locked_wallet_fails test
self.nodes[1].encryptwallet(WALLET_PASSPHRASE)
bitcoind_processes[1].wait()
self.nodes[1] = start_node(1, self.options.tmpdir, extra_args[1])
self.nodes[1].walletpassphrase(WALLET_PASSPHRASE, WALLET_PASSPHRASE_TIMEOUT)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
peer_node, rbf_node = self.nodes
rbf_node_address = rbf_node.getnewaddress()
# fund rbf node with 10 coins of 0.001 btc (100,000 satoshis)
print("Mining blocks...")
peer_node.generate(110)
self.sync_all()
for i in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
self.sync_all()
assert_equal(rbf_node.getbalance(), Decimal("0.025"))
print("Running tests")
dest_address = peer_node.getnewaddress()
test_small_output_fails(rbf_node, dest_address)
test_dust_to_fee(rbf_node, dest_address)
test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(rbf_node, dest_address)
test_nonrbf_bumpfee_fails(peer_node, dest_address)
test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address)
test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address)
test_settxfee(rbf_node, dest_address)
test_rebumping(rbf_node, dest_address)
test_rebumping_not_replaceable(rbf_node, dest_address)
test_unconfirmed_not_spendable(rbf_node, rbf_node_address)
test_locked_wallet_fails(rbf_node, dest_address)
print("Success")
def test_simple_bumpfee_succeeds(rbf_node, peer_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
rbftx = rbf_node.gettransaction(rbfid)
sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] - abs(rbftx["fee"]) > 0
# check that bumped_tx propogates, original tx was evicted and has a wallet conflict
sync_mempools((rbf_node, peer_node))
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert bumped_tx["txid"] in peer_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
assert rbfid not in peer_node.getrawmempool()
oldwtx = rbf_node.gettransaction(rbfid)
assert len(oldwtx["walletconflicts"]) > 0
# check wallet transaction replaces and replaced_by values
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
def test_segwit_bumpfee_succeeds(rbf_node, dest_address):
# Create a transaction with segwit output, then create an RBF transaction
# which spends it, and make sure bumpfee can be called on it.
segwit_in = next(u for u in rbf_node.listunspent() if u["amount"] == Decimal("0.001"))
segwit_out = rbf_node.validateaddress(rbf_node.getnewaddress())
rbf_node.addwitnessaddress(segwit_out["address"])
segwitid = send_to_witness(
version=0,
node=rbf_node,
utxo=segwit_in,
pubkey=segwit_out["pubkey"],
encode_p2sh=False,
amount=Decimal("0.0009"),
sign=True)
rbfraw = rbf_node.createrawtransaction([{
'txid': segwitid,
'vout': 0,
"sequence": BIP125_SEQUENCE_NUMBER
}], {dest_address: Decimal("0.0005"),
get_change_address(rbf_node): Decimal("0.0003")})
rbfsigned = rbf_node.signrawtransaction(rbfraw)
rbfid = rbf_node.sendrawtransaction(rbfsigned["hex"])
assert rbfid in rbf_node.getrawmempool()
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["txid"] in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
def test_nonrbf_bumpfee_fails(peer_node, dest_address):
# cannot replace a non RBF transaction (from node which did not enable RBF)
not_rbfid = create_fund_sign_send(peer_node, {dest_address: 0.00090000})
assert_raises_message(JSONRPCException, "not BIP 125 replaceable", peer_node.bumpfee, not_rbfid)
def test_notmine_bumpfee_fails(rbf_node, peer_node, dest_address):
# cannot bump fee unless the tx has only inputs that we own.
# here, the rbftx has a peer_node coin and then adds a rbf_node input
# Note that this test depends upon the RPC code checking input ownership prior to change outputs
# (since it can't use fundrawtransaction, it lacks a proper change output)
utxos = [node.listunspent()[-1] for node in (rbf_node, peer_node)]
inputs = [{
"txid": utxo["txid"],
"vout": utxo["vout"],
"address": utxo["address"],
"sequence": BIP125_SEQUENCE_NUMBER
} for utxo in utxos]
output_val = sum(utxo["amount"] for utxo in utxos) - Decimal("0.001")
rawtx = rbf_node.createrawtransaction(inputs, {dest_address: output_val})
signedtx = rbf_node.signrawtransaction(rawtx)
signedtx = peer_node.signrawtransaction(signedtx["hex"])
rbfid = rbf_node.sendrawtransaction(signedtx["hex"])
assert_raises_message(JSONRPCException, "Transaction contains inputs that don't belong to this wallet",
rbf_node.bumpfee, rbfid)
def test_bumpfee_with_descendant_fails(rbf_node, rbf_node_address, dest_address):
# cannot bump fee if the transaction has a descendant
# parent is send-to-self, so we don't have to check which output is change when creating the child tx
parent_id = create_fund_sign_send(rbf_node, {rbf_node_address: 0.00050000})
tx = rbf_node.createrawtransaction([{"txid": parent_id, "vout": 0}], {dest_address: 0.00020000})
tx = rbf_node.signrawtransaction(tx)
txid = rbf_node.sendrawtransaction(tx["hex"])
assert_raises_message(JSONRPCException, "Transaction has descendants in the wallet", rbf_node.bumpfee, parent_id)
def test_small_output_fails(rbf_node, dest_address):
# cannot bump fee with a too-small output
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
{dest_address: 0.00080000,
get_change_address(rbf_node): Decimal("0.00010000")})
rbf_node.bumpfee(rbfid, {"totalFee": 20000})
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
{dest_address: 0.00080000,
get_change_address(rbf_node): Decimal("0.00010000")})
assert_raises_message(JSONRPCException, "Change output is too small", rbf_node.bumpfee, rbfid, {"totalFee": 20001})
def test_dust_to_fee(rbf_node, dest_address):
# check that if output is reduced to dust, it will be converted to fee
# the bumped tx sets fee=9900, but it converts to 10,000
rbfid = spend_one_input(rbf_node,
Decimal("0.00100000"),
{dest_address: 0.00080000,
get_change_address(rbf_node): Decimal("0.00010000")})
fulltx = rbf_node.getrawtransaction(rbfid, 1)
bumped_tx = rbf_node.bumpfee(rbfid, {"totalFee": 19900})
full_bumped_tx = rbf_node.getrawtransaction(bumped_tx["txid"], 1)
assert_equal(bumped_tx["fee"], Decimal("0.00020000"))
assert_equal(len(fulltx["vout"]), 2)
assert_equal(len(full_bumped_tx["vout"]), 1) #change output is eliminated
def test_settxfee(rbf_node, dest_address):
# check that bumpfee reacts correctly to the use of settxfee (paytxfee)
# increase feerate by 2.5x, test that fee increased at least 2x
rbf_node.settxfee(Decimal("0.00001000"))
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
rbftx = rbf_node.gettransaction(rbfid)
rbf_node.settxfee(Decimal("0.00002500"))
bumped_tx = rbf_node.bumpfee(rbfid)
assert bumped_tx["fee"] > 2 * abs(rbftx["fee"])
rbf_node.settxfee(Decimal("0.00000000")) # unset paytxfee
def test_rebumping(rbf_node, dest_address):
# check that re-bumping the original tx fails, but bumping the bumper succeeds
rbf_node.settxfee(Decimal("0.00001000"))
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 1000})
assert_raises_message(JSONRPCException, "already bumped", rbf_node.bumpfee, rbfid, {"totalFee": 2000})
rbf_node.bumpfee(bumped["txid"], {"totalFee": 2000})
def test_rebumping_not_replaceable(rbf_node, dest_address):
# check that re-bumping a non-replaceable bump tx fails
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
bumped = rbf_node.bumpfee(rbfid, {"totalFee": 10000, "replaceable": False})
assert_raises_message(JSONRPCException, "Transaction is not BIP 125 replaceable", rbf_node.bumpfee, bumped["txid"],
{"totalFee": 20000})
def test_unconfirmed_not_spendable(rbf_node, rbf_node_address):
# check that unconfirmed outputs from bumped transactions are not spendable
rbfid = create_fund_sign_send(rbf_node, {rbf_node_address: 0.00090000})
rbftx = rbf_node.gettransaction(rbfid)["hex"]
assert rbfid in rbf_node.getrawmempool()
bumpid = rbf_node.bumpfee(rbfid)["txid"]
assert bumpid in rbf_node.getrawmempool()
assert rbfid not in rbf_node.getrawmempool()
# check that outputs from the bump transaction are not spendable
# due to the replaces_txid check in CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == bumpid], [])
# submit a block with the rbf tx to clear the bump tx out of the mempool,
# then call abandon to make sure the wallet doesn't attempt to resubmit the
# bump tx, then invalidate the block so the rbf tx will be put back in the
# mempool. this makes it possible to check whether the rbf tx outputs are
# spendable before the rbf tx is confirmed.
block = submit_block_with_tx(rbf_node, rbftx)
rbf_node.abandontransaction(bumpid)
rbf_node.invalidateblock(block.hash)
assert bumpid not in rbf_node.getrawmempool()
assert rbfid in rbf_node.getrawmempool()
# check that outputs from the rbf tx are not spendable before the
# transaction is confirmed, due to the replaced_by_txid check in
# CWallet::AvailableCoins
assert_equal([t for t in rbf_node.listunspent(minconf=0, include_unsafe=False) if t["txid"] == rbfid], [])
# check that the main output from the rbf tx is spendable after confirmed
rbf_node.generate(1)
assert_equal(
sum(1 for t in rbf_node.listunspent(minconf=0, include_unsafe=False)
if t["txid"] == rbfid and t["address"] == rbf_node_address and t["spendable"]), 1)
def test_locked_wallet_fails(rbf_node, dest_address):
rbfid = create_fund_sign_send(rbf_node, {dest_address: 0.00090000})
rbf_node.walletlock()
assert_raises_message(JSONRPCException, "Please enter the wallet passphrase with walletpassphrase first.",
rbf_node.bumpfee, rbfid)
def create_fund_sign_send(node, outputs):
rawtx = node.createrawtransaction([], outputs)
fundtx = node.fundrawtransaction(rawtx)
signedtx = node.signrawtransaction(fundtx["hex"])
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def spend_one_input(node, input_amount, outputs):
input = dict(sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in node.listunspent() if u["amount"] == input_amount))
rawtx = node.createrawtransaction([input], outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
return txid
def get_change_address(node):
"""Get a wallet change address.
There is no wallet RPC to access unused change addresses, so this creates a
dummy transaction, calls fundrawtransaction to give add an input and change
output, then returns the change address."""
dest_address = node.getnewaddress()
dest_amount = Decimal("0.00012345")
rawtx = node.createrawtransaction([], {dest_address: dest_amount})
fundtx = node.fundrawtransaction(rawtx)
info = node.decoderawtransaction(fundtx["hex"])
return next(address for out in info["vout"]
if out["value"] != dest_amount for address in out["scriptPubKey"]["addresses"])
def submit_block_with_tx(node, tx):
ctx = CTransaction()
ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx)))
tip = node.getbestblockhash()
height = node.getblockcount() + 1
block_time = node.getblockheader(tip)["mediantime"] + 1
block = blocktools.create_block(int(tip, 16), blocktools.create_coinbase(height), block_time)
block.vtx.append(ctx)
block.rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
error = node.submitblock(bytes_to_hex_str(block.serialize(True)))
if error is not None:
raise Exception(error)
return block
if __name__ == "__main__":
BumpFeeTest().main()
| svost/bitcoin | qa/rpc-tests/bumpfee.py | Python | mit | 14,241 | 0.002036 |
from __future__ import unicode_literals
from django.apps import AppConfig
class DirtyDriveConfig(AppConfig):
name = 'DirtyDrive'
| Superchicken1/DirtyDrive | DirtyDrive/apps.py | Python | apache-2.0 | 136 | 0 |
# epydoc.py: manpage-style text output
# Edward Loper
#
# Created [01/30/01 05:18 PM]
# $Id: man.py,v 1.6 2003/07/18 15:46:19 edloper Exp $
#
"""
Documentation formatter that produces man-style documentation.
@note: This module is under development. It generates incomplete
documentation pages, and is not yet incorperated into epydoc's
command-line interface.
"""
__docformat__ = 'epytext en'
##################################################
## Imports
##################################################
# system imports
import sys, xml.dom.minidom
# epydoc imports
import epydoc
from epydoc.uid import UID, Link, findUID, make_uid
from epydoc.imports import import_module
from epydoc.objdoc import DocMap, ModuleDoc, FuncDoc
from epydoc.objdoc import ClassDoc, Var, Raise, ObjDoc
##################################################
## Documentation -> Text Conversion
##################################################
class ManFormatter:
def __init__(self, docmap, **kwargs):
self._docmap = docmap
#////////////////////////////////////////////////////////////
# Basic Doc Pages
#////////////////////////////////////////////////////////////
def documentation(self, uid):
if not self._docmap.has_key(uid):
print '**NO DOCS ON %s **' % uid
return
doc = self._docmap[uid]
if uid.is_module(): return self._modulepage(uid, doc)
elif uid.is_class(): return self._classpage(uid, doc)
elif uid.is_routine(): return self._routinepage(uid, doc)
elif uid.is_variable(): return self._varpage(uid, doc)
def _modulepage(self, uid, doc):
str = self._name(uid)
str += self._descr(uid, doc)
str += self._funclist(doc.functions(), doc, 'FUNCTIONS')
return str
def _classpage(self, uid, doc):
str = self._name(uid)
str += self._descr(uid, doc)
str += self._funclist(doc.methods(), doc, 'METHODS')
str += self._funclist(doc.staticmethods(), doc, 'STATIC METHODS')
str += self._funclist(doc.classmethods(), doc, 'CLASS METHODS')
return str
def _routinepage(self, uid, doc):
str = self._name(uid)
str += self._descr(uid, doc)
return str
def _varpage(self, uid, doc):
str = self._name(uid)
str += self._descr(uid, doc)
return str
#////////////////////////////////////////////////////////////
# Functions
#////////////////////////////////////////////////////////////
def _funclist(self, functions, cls, title='FUNCTIONS'):
str = self._title(title)
numfuncs = 0
for link in functions:
fname = link.name()
func = link.target()
if func.is_method():
container = func.cls()
inherit = (container != cls.uid())
else:
inherit = 0
try: container = func.module()
except TypeError: container = None
if not self._docmap.has_key(func):
continue
# If we don't have documentation for the function, then we
# can't say anything about it.
if not self._docmap.has_key(func): continue
fdoc = self._docmap[func]
# What does this method override?
foverrides = fdoc.overrides()
# Try to find a documented ancestor.
inhdoc = self._docmap.documented_ancestor(func) or fdoc
inherit_docs = (inhdoc is not fdoc)
numfuncs += 1
str += ' %s\n' % self._func_signature(self._bold(fname), fdoc)
# Use the inherited docs for everything but the signature.
fdoc = inhdoc
fdescr=fdoc.descr()
fparam = fdoc.parameter_list()[:]
freturn = fdoc.returns()
fraises = fdoc.raises()
# Don't list parameters that don't have any extra info.
f = lambda p:p.descr() or p.type()
fparam = filter(f, fparam)
# Description
if fdescr:
fdescr_str = fdescr.to_plaintext(None, indent=8)
if fdescr_str.strip(): str += fdescr_str
# Parameters
if fparam:
str += ' Parameters:\n'
for param in fparam:
pname = param.name()
str += ' ' + pname
if param.descr():
pdescr = param.descr().to_plaintext(None, indent=12)
str += ' - %s' % pdescr.strip()
str += '\n'
if param.type():
ptype = param.type().to_plaintext(none, indent=16)
str += ' '*16+'(type=%s)\n' % ptype.strip()
# Returns
if freturn.descr():
fdescr = freturn.descr().to_plaintext(None, indent=12)
str += ' Returns:\n%s' % fdescr
if freturn.type():
ftype = freturn.type().to_plaintext(None, indent=12)
str += (" Return Type: %s" % ftype.lstrip())
## Raises
#if fraises:
# str += ' Raises:\n'
# for fraise in fraises:
# str += ' '
# str += ''+fraise.name()+' -\n'
# str += epytext.to_plaintext(fraise.descr(), 12)
## Overrides
#if foverrides:
# str += ' <dl><dt><b>Overrides:</b></dt>\n'
# str += ' <dd>'+self._uid_to_href(foverrides)
# if inherit_docs:
# str += ' <i>(inherited documentation)</i>\n'
# str += '</dd>\n </dl>\n'
if numfuncs == 0: return ''
return str
def _func_signature(self, fname, fdoc, show_defaults=1):
str = fname
str += '('
str += self._params_to_text(fdoc.parameters(), show_defaults)
if fdoc.vararg():
vararg_name = fdoc.vararg().name()
if vararg_name != '...': vararg_name = '*%s' % vararg_name
str += '%s, ' % vararg_name
if fdoc.kwarg():
str += '**%s, ' % fdoc.kwarg().name()
if str[-1] != '(': str = str[:-2]
return str + ')'
def _params_to_text(self, parameters, show_defaults):
str = ''
for param in parameters:
if type(param) in (type([]), type(())):
sublist = self._params_to_text(param,
show_defaults)
str += '(%s), ' % sublist[:-2]
else:
str += param.name()
if show_defaults and param.default() is not None:
default = param.default()
if len(default) > 60:
default = default[:57]+'...'
str += '=%s' % default
str += ', '
return str
#////////////////////////////////////////////////////////////
# Helpers
#////////////////////////////////////////////////////////////
def _bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join([ch+'\b'+ch for ch in text])
def _title(self, text):
return '%s\n' % self._bold(text)
def _kind(self, uid):
if uid.is_package(): return 'package'
elif uid.is_module(): return 'module'
elif uid.is_class(): return 'class'
elif uid.is_method() or uid.is_builtin_method(): return 'method'
elif uid.is_routine(): return 'function'
elif uid.is_variable(): return 'variable'
else: raise AssertionError, 'Bad UID type for _name'
def _name(self, uid):
if uid.parent():
parent = uid.parent()
name = '%s %s in %s %s' % (self._kind(uid),
self._bold(uid.shortname()),
self._kind(parent),
self._bold(parent.name()))
else:
name = '%s %s' % (self._kind(uid), self._bold(uid.name()))
return '%s %s\n\n' % (self._title('NAME'), name)
def _descr(self, uid, doc):
if not doc.descr(): return ''
descr = doc.descr().to_plaintext(None, indent=4)
return '%s%s' % (self._title('DESCRIPTION'), descr)
if __name__ == '__main__':
docmap = DocMap(document_bases=1)
uids = [findUID(name) for name in sys.argv[1:]]
uids = [uid for uid in uids if uid is not None]
for uid in uids: docmap.add(uid.value())
formatter = ManFormatter(docmap)
for uid in uids:
print formatter.documentation(uid)
| dabodev/dabodoc | api/epydoc/man.py | Python | mit | 8,842 | 0.005881 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1.types import index_endpoint
from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1.types import index_endpoint_service
from google.longrunning import operations_pb2 # type: ignore
from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import IndexEndpointServiceGrpcTransport
class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport):
"""gRPC AsyncIO backend transport for IndexEndpointService.
A service for managing Vertex AI's IndexEndpoints.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.CreateIndexEndpointRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create index endpoint method over gRPC.
Creates an IndexEndpoint.
Returns:
Callable[[~.CreateIndexEndpointRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_index_endpoint" not in self._stubs:
self._stubs["create_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/CreateIndexEndpoint",
request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_index_endpoint"]
@property
def get_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.GetIndexEndpointRequest],
Awaitable[index_endpoint.IndexEndpoint],
]:
r"""Return a callable for the get index endpoint method over gRPC.
Gets an IndexEndpoint.
Returns:
Callable[[~.GetIndexEndpointRequest],
Awaitable[~.IndexEndpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index_endpoint" not in self._stubs:
self._stubs["get_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/GetIndexEndpoint",
request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize,
response_deserializer=index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["get_index_endpoint"]
@property
def list_index_endpoints(
self,
) -> Callable[
[index_endpoint_service.ListIndexEndpointsRequest],
Awaitable[index_endpoint_service.ListIndexEndpointsResponse],
]:
r"""Return a callable for the list index endpoints method over gRPC.
Lists IndexEndpoints in a Location.
Returns:
Callable[[~.ListIndexEndpointsRequest],
Awaitable[~.ListIndexEndpointsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_index_endpoints" not in self._stubs:
self._stubs["list_index_endpoints"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/ListIndexEndpoints",
request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize,
response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize,
)
return self._stubs["list_index_endpoints"]
@property
def update_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.UpdateIndexEndpointRequest],
Awaitable[gca_index_endpoint.IndexEndpoint],
]:
r"""Return a callable for the update index endpoint method over gRPC.
Updates an IndexEndpoint.
Returns:
Callable[[~.UpdateIndexEndpointRequest],
Awaitable[~.IndexEndpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_index_endpoint" not in self._stubs:
self._stubs["update_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/UpdateIndexEndpoint",
request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize,
response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize,
)
return self._stubs["update_index_endpoint"]
@property
def delete_index_endpoint(
self,
) -> Callable[
[index_endpoint_service.DeleteIndexEndpointRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete index endpoint method over gRPC.
Deletes an IndexEndpoint.
Returns:
Callable[[~.DeleteIndexEndpointRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_index_endpoint" not in self._stubs:
self._stubs["delete_index_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/DeleteIndexEndpoint",
request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_index_endpoint"]
@property
def deploy_index(
self,
) -> Callable[
[index_endpoint_service.DeployIndexRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the deploy index method over gRPC.
Deploys an Index into this IndexEndpoint, creating a
DeployedIndex within it.
Only non-empty Indexes can be deployed.
Returns:
Callable[[~.DeployIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "deploy_index" not in self._stubs:
self._stubs["deploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/DeployIndex",
request_serializer=index_endpoint_service.DeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_index"]
@property
def undeploy_index(
self,
) -> Callable[
[index_endpoint_service.UndeployIndexRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the undeploy index method over gRPC.
Undeploys an Index from an IndexEndpoint, removing a
DeployedIndex from it, and freeing all resources it's
using.
Returns:
Callable[[~.UndeployIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "undeploy_index" not in self._stubs:
self._stubs["undeploy_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/UndeployIndex",
request_serializer=index_endpoint_service.UndeployIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_index"]
@property
def mutate_deployed_index(
self,
) -> Callable[
[index_endpoint_service.MutateDeployedIndexRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the mutate deployed index method over gRPC.
Update an existing DeployedIndex under an
IndexEndpoint.
Returns:
Callable[[~.MutateDeployedIndexRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_deployed_index" not in self._stubs:
self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1.IndexEndpointService/MutateDeployedIndex",
request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["mutate_deployed_index"]
def close(self):
return self.grpc_channel.close()
__all__ = ("IndexEndpointServiceGrpcAsyncIOTransport",)
| googleapis/python-aiplatform | google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py | Python | apache-2.0 | 21,535 | 0.002136 |
#!/usr/bin/env python
from pylab import *
t,x,y,u,v,ax,ay = loadtxt('trajectory.dat',unpack=True)
r = sqrt(x**2+y**2)
k = u**2+v**2
s = '.' if t.size < 100 else ''
figure('Trajectory',figsize=(5,4))
subplot(111,aspect=1)
plot(x,y,'b%s-'%s,lw=1)
xl,xh = (x.min(),x.max())
xb = 0.1*(xh-xl)
xlim(xl-xb,xh+xb)
yl,yh = (y.min(),y.max())
yb = 0.1*(yh-yl)
ylim(yl-yb,yh+yb)
xlabel(r'$x$-coordinate [m]')
ylabel(r'$y$-coordinate [m]')
tight_layout()
figure('',figsize=(8,8))
subplot(221)
#~ figure('Decay',figsize=(5,4))
plot(t,r,'r%s-'%s)
yl,yh = ylim()
yb = 0.1*(yh-0)
ylim(0-yb,yh+5*yb)
xlabel(r'Time $t$ [s]')
ylabel(r'Radius $r$ [m]')
#~ tight_layout()
subplot(222)
#~ figure('Kinetic Energy',figsize=(5,4))
plot(t,k,'r%s-'%s)
yl,yh = ylim()
yb = 0.1*(yh-0)
ylim(0-yb,yh+5*yb)
xlabel(r'Time $t$ [s]')
ylabel(r'Kinetic Energy $KE$ [J]')
#~ tight_layout()
subplot(223)
#~ figure('Velocities',figsize=(5,4))
plot(t,u,'r%s-'%s,label=r'$\vec{v}\cdot\hat{e}_x$')
plot(t,v,'b%s-'%s,label=r'$\vec{v}\cdot\hat{e}_y$')
yl,yh = ylim()
yb = 0.1*(yh-yl)
ylim(yl-yb,yh+5*yb)
xlabel(r'Time $t$ [s]')
ylabel(r'Velocity $\vec{v}\cdot\hat{e}_n$ [m/s]')
legend(loc='best',fancybox=True,ncol=2)
#~ tight_layout()
subplot(224)
#~ figure('Acceleration',figsize=(5,4))
plot(t,ax,'r%s-'%s,label=r'$\vec{a}\cdot\hat{e}_x$')
plot(t,ay,'b%s-'%s,label=r'$\vec{a}\cdot\hat{e}_y$')
yl,yh = ylim()
yb = 0.1*(yh-yl)
ylim(yl-yb,yh+5*yb)
xlabel(r'Time $t$ [s]')
ylabel(r'Acceleration $\vec{a}\cdot\hat{e}_n$ [m/s$^2$]')
legend(loc='best',fancybox=True,ncol=2)
#~ tight_layout()
tight_layout()
show()
| yorzh86/Step1 | scripts/post.py | Python | gpl-2.0 | 1,575 | 0.040635 |
#!/usr/bin/env python3
import os
import sys
import json
import boto3
import platform
import traceback
import subprocess
from datetime import datetime
config = {}
with open(os.path.expanduser('~/.ddns.conf')) as conf:
config.update(json.load(conf))
ZONE_ID = config['zone_id']
ROOT = config['root']
HOST = config.get('host', platform.uname().node.split('.')[0])
TTL = config.get('ttl', 300)
session = boto3.Session(profile_name='personal')
r53 = session.client('route53')
def dig_ip(hostname):
cmd = f'dig +short {hostname} @resolver1.opendns.com'.split(' ')
try:
return subprocess.check_output(cmd).decode('utf-8').strip()
except Exception as exc:
print(f'{datetime.utcnow().isoformat()}+UTC Failed to read DNS name - bailing out')
traceback.print_exc()
sys.exit(1)
def my_ip():
return dig_ip('myip.opendns.com')
def change_recordset(current_ip):
resp = r53.change_resource_record_sets(
HostedZoneId=ZONE_ID,
ChangeBatch={
'Comment': f'Automatic DDNS change {datetime.utcnow().isoformat()}+UTC',
'Changes': [{
'Action': 'UPSERT',
'ResourceRecordSet': {
'Name': '.'.join((HOST, ROOT)),
'Type': 'A',
'TTL': TTL,
'ResourceRecords': [{'Value': current_ip}]
}
}]
}
)
print(f'{datetime.utcnow().isoformat()}+UTC Submitted change request: {resp}')
def main():
current_ip = my_ip()
r53_ip = dig_ip('.'.join((HOST, ROOT)))
if current_ip != r53_ip:
print(f'{datetime.utcnow().isoformat()}+UTC Mismatch alert, {r53_ip} does not match {current_ip}')
change_recordset(current_ip)
else:
print(f'{datetime.utcnow().isoformat()}+UTC All good - IP is updated in R53')
if __name__ == '__main__':
main()
| ryansb/workstation | roles/unbound/files/ddns.py | Python | mit | 1,890 | 0.005291 |
class BASIC_MOVE:
ID = 0
Name = 1
Type = 2
PW = 3
Duration = 4
NRG = 5
NRGPS = 6
DPS = 7
# Adapted from the GAME_MASTER_FILE Json Output at:
# https://github.com/pokemongo-dev-contrib/pokemongo-game-master/
# https://raw.githubusercontent.com/pokemongo-dev-contrib/pokemongo-game-master/master/versions/latest/GAME_MASTER.json
BASIC_MOVE_DATA = [
# ID, Name,Type, PW, Duration (ms), NRG, NRGPS, DPS
[200,"Fury Cutter","Bug",3,400,6,15,7.5],
[201,"Bug Bite","Bug",5,500,6,12,10],
[202,"Bite","Dark",6,500,4,8,12],
[203,"Sucker Punch","Dark",7,700,8,11.428571428571429,10],
[204,"Dragon Breath","Dragon",6,500,4,8,12],
[205,"Thunder Shock","Electric",5,600,8,13.333333333333334,8.333333333333334],
[206,"Spark","Electric",6,700,9,12.857142857142858,8.571428571428571],
[207,"Low Kick","Fighting",6,600,6,10,10],
[208,"Karate Chop","Fighting",8,800,10,12.5,10],
[209,"Ember","Fire",10,1000,10,10,10],
[210,"Wing Attack","Flying",8,800,9,11.25,10],
[211,"Peck","Flying",10,1000,10,10,10],
[212,"Lick","Ghost",5,500,6,12,10],
[213,"Shadow Claw","Ghost",9,700,6,8.571428571428571,12.857142857142858],
[214,"Vine Whip","Grass",7,600,6,10,11.666666666666668],
[215,"Razor Leaf","Grass",13,1000,7,7,13],
[216,"Mud Shot","Ground",5,600,7,11.666666666666668,8.333333333333334],
[217,"Ice Shard","Ice",12,1200,12,10,10],
[218,"Frost Breath","Ice",10,900,8,8.88888888888889,11.11111111111111],
[219,"Quick Attack","Normal",8,800,10,12.5,10],
[220,"Scratch","Normal",6,500,4,8,12],
[221,"Tackle","Normal",5,500,5,10,10],
[222,"Pound","Normal",7,600,6,10,11.666666666666668],
[223,"Cut","Normal",5,500,5,10,10],
[224,"Poison Jab","Poison",10,800,7,8.75,12.5],
[225,"Acid","Poison",9,800,8,10,11.25],
[226,"Psycho Cut","Psychic",5,600,8,13.333333333333334,8.333333333333334],
[227,"Rock Throw","Rock",12,900,7,7.777777777777779,13.333333333333334],
[228,"Metal Claw","Steel",8,700,7,10,11.428571428571429],
[229,"Bullet Punch","Steel",9,900,10,11.11111111111111,10],
[230,"Water Gun","Water",5,500,5,10,10],
[231,"Splash","Water",0,1730,20,11.560693641618498,0],
[232,"Water Gun Blastoise","Water",10,1000,6,6,10],
[233,"Mud Slap","Ground",15,1400,12,8.571428571428571,10.714285714285715],
[234,"Zen Headbutt","Psychic",12,1100,10,9.09090909090909,10.909090909090908],
[235,"Confusion","Psychic",20,1600,15,9.375,12.5],
[236,"Poison Sting","Poison",5,600,7,11.666666666666668,8.333333333333334],
[237,"Bubble","Water",12,1200,14,11.666666666666668,10],
[238,"Feint Attack","Dark",10,900,9,10,11.11111111111111],
[239,"Steel Wing","Steel",11,800,6,7.5,13.75],
[240,"Fire Fang","Fire",11,900,8,8.88888888888889,12.222222222222223],
[241,"Rock Smash","Fighting",15,1300,10,7.692307692307692,11.538461538461537],
[242,"Transform","Normal",0,2230,0,0,0],
[243,"Counter","Fighting",12,900,8,8.88888888888889,13.333333333333334],
[244,"Powder Snow","Ice",6,1000,15,15,6],
[249,"Charge Beam","Electric",8,1100,15,13.636363636363637,7.2727272727272725],
[250,"Volt Switch","Electric",20,2300,25,10.869565217391305,8.695652173913045],
[253,"Dragon Tail","Dragon",15,1100,9,8.181818181818182,13.636363636363637],
[255,"Air Slash","Flying",14,1200,10,8.333333333333334,11.666666666666668],
[260,"Infestation","Bug",10,1100,14,12.727272727272727,9.09090909090909],
[261,"Struggle Bug","Bug",15,1500,15,10,10],
[263,"Astonish","Ghost",8,1100,14,12.727272727272727,7.2727272727272725],
[264,"Hex","Ghost",10,1200,15,12.5,8.333333333333334],
[266,"Iron Tail","Steel",15,1100,7,6.363636363636363,13.636363636363637],
[269,"Fire Spin","Fire",14,1100,10,9.09090909090909,12.727272727272727],
[271,"Bullet Seed","Grass",8,1100,14,12.727272727272727,7.2727272727272725],
[274,"Extrasensory","Psychic",12,1100,12,10.909090909090908,10.909090909090908],
[278,"Snarl","Dark",12,1100,12,10.909090909090908,10.909090909090908],
[281,"Hidden Power","Normal",15,1500,15,10,10],
[282,"Take Down","Normal",8,1200,10,8.333333333333334,6.666666666666667],
[283,"Waterfall","Water",16,1200,8,6.666666666666667,13.333333333333334],
[287,"Yawn","Normal",0,1700,15,8.823529411764707,0],
[291,"Present","Normal",5,1300,20,15.384615384615383,3.846153846153846],
[297,"Smack Down","Rock",16,1200,8,6.666666666666667,13.333333333333334],
]
def _get_basic_move_by_name(name):
for mv in BASIC_MOVE_DATA:
if name == mv[BASIC_MOVE.Name]:
return mv
return None
class CHARGE_MOVE:
ID = 0
Name = 1
Type = 2
PW = 3
Duration = 4
Crit = 5
NRG = 6
# Adapted from the GAME_MASTER_FILE Json Output at:
# https://github.com/pokemongo-dev-contrib/pokemongo-game-master/
# https://raw.githubusercontent.com/pokemongo-dev-contrib/pokemongo-game-master/master/versions/latest/GAME_MASTER.json
CHARGE_MOVE_DATA = [
# ID Name Type PW Duration (ms) Crit% NRG Cost
[13,"Wrap","Normal",60,2900,5,33],
[14,"Hyper Beam","Normal",150,3800,5,100],
[16,"Dark Pulse","Dark",80,3000,5,50],
[18,"Sludge","Poison",50,2100,5,33],
[20,"Vice Grip","Normal",35,1900,5,33],
[21,"Flame Wheel","Fire",60,2700,5,50],
[22,"Megahorn","Bug",90,2200,5,100],
[24,"Flamethrower","Fire",70,2200,5,50],
[26,"Dig","Ground",100,4700,5,50],
[28,"Cross Chop","Fighting",50,1500,5,50],
[30,"Psybeam","Psychic",70,3200,5,50],
[31,"Earthquake","Ground",120,3600,5,100],
[32,"Stone Edge","Rock",100,2300,5,100],
[33,"Ice Punch","Ice",50,1900,5,33],
[34,"Heart Stamp","Psychic",40,1900,5,33],
[35,"Discharge","Electric",65,2500,5,33],
[36,"Flash Cannon","Steel",100,2700,5,100],
[38,"Drill Peck","Flying",60,2300,5,33],
[39,"Ice Beam","Ice",90,3300,5,50],
[40,"Blizzard","Ice",130,3100,5,100],
[42,"Heat Wave","Fire",95,3000,5,100],
[45,"Aerial Ace","Flying",55,2400,5,33],
[46,"Drill Run","Ground",80,2800,5,50],
[47,"Petal Blizzard","Grass",110,2600,5,100],
[48,"Mega Drain","Grass",25,2600,5,50],
[49,"Bug Buzz","Bug",90,3700,5,50],
[50,"Poison Fang","Poison",35,1700,5,33],
[51,"Night Slash","Dark",50,2200,5,33],
[53,"Bubble Beam","Water",45,1900,5,33],
[54,"Submission","Fighting",60,2200,5,50],
[56,"Low Sweep","Fighting",40,1900,5,33],
[57,"Aqua Jet","Water",45,2600,5,33],
[58,"Aqua Tail","Water",50,1900,5,33],
[59,"Seed Bomb","Grass",55,2100,5,33],
[60,"Psyshock","Psychic",65,2700,5,33],
[62,"Ancient Power","Rock",70,3500,5,33],
[63,"Rock Tomb","Rock",70,3200,5,50],
[64,"Rock Slide","Rock",80,2700,5,50],
[65,"Power Gem","Rock",80,2900,5,50],
[66,"Shadow Sneak","Ghost",50,2900,5,33],
[67,"Shadow Punch","Ghost",40,1700,5,33],
[69,"Ominous Wind","Ghost",50,2300,5,33],
[70,"Shadow Ball","Ghost",100,3000,5,50],
[72,"Magnet Bomb","Steel",70,2800,5,33],
[74,"Iron Head","Steel",60,1900,5,50],
[75,"Parabolic Charge","Electric",25,2800,5,50],
[77,"Thunder Punch","Electric",45,1800,5,33],
[78,"Thunder","Electric",100,2400,5,100],
[79,"Thunderbolt","Electric",80,2500,5,50],
[80,"Twister","Dragon",45,2800,5,33],
[82,"Dragon Pulse","Dragon",90,3600,5,50],
[83,"Dragon Claw","Dragon",50,1700,5,33],
[84,"Disarming Voice","Fairy",70,3900,5,33],
[85,"Draining Kiss","Fairy",60,2600,5,50],
[86,"Dazzling Gleam","Fairy",100,3500,5,50],
[87,"Moonblast","Fairy",130,3900,5,100],
[88,"Play Rough","Fairy",90,2900,5,50],
[89,"Cross Poison","Poison",40,1500,5,33],
[90,"Sludge Bomb","Poison",80,2300,5,50],
[91,"Sludge Wave","Poison",110,3200,5,100],
[92,"Gunk Shot","Poison",130,3100,5,100],
[94,"Bone Club","Ground",40,1600,5,33],
[95,"Bulldoze","Ground",80,3500,5,50],
[96,"Mud Bomb","Ground",55,2300,5,33],
[99,"Signal Beam","Bug",75,2900,5,50],
[100,"X-Scissor","Bug",45,1600,5,33],
[101,"Flame Charge","Fire",70,3800,5,33],
[102,"Flame Burst","Fire",70,2600,5,50],
[103,"Fire Blast","Fire",140,4200,5,100],
[104,"Brine","Water",60,2300,5,50],
[105,"Water Pulse","Water",70,3200,5,50],
[106,"Scald","Water",80,3700,5,50],
[107,"Hydro Pump","Water",130,3300,5,100],
[108,"Psychic","Psychic",100,2800,5,100],
[109,"Psystrike","Psychic",100,4400,5,50],
[111,"Icy Wind","Ice",60,3300,5,33],
[114,"Giga Drain","Grass",50,3900,5,100],
[115,"Fire Punch","Fire",55,2200,5,33],
[116,"Solar Beam","Grass",180,4900,5,100],
[117,"Leaf Blade","Grass",70,2400,5,33],
[118,"Power Whip","Grass",90,2600,5,50],
[121,"Air Cutter","Flying",60,2700,5,50],
[122,"Hurricane","Flying",110,2700,5,100],
[123,"Brick Break","Fighting",40,1600,5,33],
[125,"Swift","Normal",60,2800,5,50],
[126,"Horn Attack","Normal",40,1850,5,33],
[127,"Stomp","Normal",55,1700,5,50],
[129,"Hyper Fang","Normal",80,2500,5,50],
[131,"Body Slam","Normal",50,1900,5,33],
[132,"Rest","Normal",50,1900,5,33],
[133,"Struggle","Normal",35,2200,5,0],
[134,"Scald Blastoise","Water",50,4700,5,100],
[135,"Hydro Pump Blastoise","Water",90,4500,5,100],
[136,"Wrap Green","Normal",25,2900,5,33],
[137,"Wrap Pink","Normal",25,2900,5,33],
[245,"Close Combat","Fighting",100,2300,5,100],
[246,"Dynamic Punch","Fighting",90,2700,5,50],
[247,"Focus Blast","Fighting",140,3500,5,100],
[248,"Aurora Beam","Ice",80,3550,5,50],
[251,"Wild Charge","Electric",90,2600,5,50],
[252,"Zap Cannon","Electric",140,3700,5,100],
[254,"Avalanche","Ice",90,2700,5,50],
[256,"Brave Bird","Flying",90,2000,5,100],
[257,"Sky Attack","Flying",80,2000,5,50],
[258,"Sand Tomb","Ground",80,4000,5,50],
[259,"Rock Blast","Rock",50,2100,5,33],
[262,"Silver Wind","Bug",70,3700,5,33],
[265,"Night Shade","Ghost",60,2600,5,50],
[267,"Gyro Ball","Steel",80,3300,5,50],
[268,"Heavy Slam","Steel",70,2100,5,50],
[270,"Overheat","Fire",160,4000,5,100],
[272,"Grass Knot","Grass",90,2600,5,50],
[273,"Energy Ball","Grass",90,3900,5,50],
[275,"Futuresight","Psychic",120,2700,5,100],
[276,"Mirror Coat","Psychic",60,2600,5,50],
[277,"Outrage","Dragon",110,3900,5,50],
[279,"Crunch","Dark",70,3200,5,33],
[280,"Foul Play","Dark",70,2000,5,50],
[284,"Surf","Water",65,1700,5,50],
[285,"Draco Meteor","Dragon",150,3600,5,100],
[286,"Doom Desire","Steel",80,1700,5,50],
[288,"Psycho Boost","Psychic",70,4000,5,50],
[289,"Origin Pulse","Water",130,1700,5,100],
[290,"Precipice Blades","Ground",130,1700,5,100],
[292,"Weather Ball Fire","Fire",60,1600,5,33],
[293,"Weather Ball Ice","Ice",60,1600,5,33],
[294,"Weather Ball Rock","Rock",60,1600,5,33],
[295,"Weather Ball Water","Water",60,1600,5,33],
[296,"Frenzy Plant","Grass",100,2600,5,50],
[298,"Blast Burn","Fire",110,3300,5,50],
[299,"Hydro Cannon","Water",90,1900,5,50],
[300,"Last Resort","Normal",90,2900,5,50],
]
def _get_charge_move_by_name(name):
for mv in CHARGE_MOVE_DATA:
if name == mv[CHARGE_MOVE.Name]:
return mv
return None
| darkshark007/PoGoCollection | Data/moves.py | Python | gpl-3.0 | 11,221 | 0.111755 |
# # # # #
# MOVE THE NEWLY DOWNLOADED TAS / PR CMIP5 data from work desktop to /Shared
# # # # #
def move_new_dir( fn, output_dir ):
dirname, basename = os.path.split( fn )
elems = basename.split('.')[0].split( '_' )
variable, cmor_table, model, scenario, experiment, years = elems
new_dir = os.path.join( output_dir, model, scenario, variable )
try:
if not os.path.exists( new_dir ):
os.makedirs( new_dir )
except:
pass
return shutil.copy( fn, new_dir )
if __name__ == '__main__':
import os, glob, shutil
path = '/srv/synda/sdt/data'
output_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/raw_cmip5_tas_pr'
filelist = []
for root, subs, files in os.walk( path ):
if len( files ) > 0:
filelist = filelist + [ os.path.join( root, i ) for i in files if i.endswith( '.nc' ) ]
out = [ move_new_dir( fn, output_dir ) for fn in filelist ]
# # # # # # # #
# # CHECK FOR DUPLICATES and remove by hand. this is tedious.
# GFDL - OK
# CCSM4 - FIXED OK
# GISS-E2-R - OK
# IPSL - OK
# MRI - OK | ua-snap/downscale | snap_scripts/epscor_sc/move_raw_cmip5_tas_pr.py | Python | mit | 1,039 | 0.052936 |
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Float64Index, Index, Int64Index, NaT, Timedelta, TimedeltaIndex,
timedelta_range)
import pandas.util.testing as tm
class TestTimedeltaIndex(object):
def test_astype_object(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), NaT,
Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_uint(self):
arr = timedelta_range('1H', periods=2)
expected = pd.UInt64Index(
np.array([3600000000000, 90000000000000], dtype="uint64")
)
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize('dtype', [
float, 'datetime64', 'datetime64[ns]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
msg = 'Cannot cast TimedeltaArray to dtype'
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_category(self):
obj = pd.timedelta_range("1H", periods=2, freq='H')
result = obj.astype('category')
expected = pd.CategoricalIndex([pd.Timedelta('1H'),
pd.Timedelta('2H')])
tm.assert_index_equal(result, expected)
result = obj._data.astype('category')
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = pd.timedelta_range("1H", periods=2)
result = obj.astype(bool)
expected = pd.Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
| GuessWhoSamFoo/pandas | pandas/tests/indexes/timedeltas/test_astype.py | Python | bsd-3-clause | 4,066 | 0 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, cint, cstr, flt, getdate, nowdate, rounded
from frappe.model.naming import make_autoname
from frappe import msgprint, _
from erpnext.setup.utils import get_company_currency
from erpnext.hr.utils import set_employee_name
from erpnext.utilities.transaction_base import TransactionBase
class SalarySlip(TransactionBase):
def autoname(self):
self.name = make_autoname('Sal Slip/' +self.employee + '/.#####')
def get_emp_and_leave_details(self):
if self.employee:
self.get_leave_details()
struct = self.check_sal_struct()
if struct:
self.pull_sal_struct(struct)
def check_sal_struct(self):
struct = frappe.db.sql("""select name from `tabSalary Structure`
where employee=%s and is_active = 'Yes'""", self.employee)
if not struct:
msgprint(_("Please create Salary Structure for employee {0}").format(self.employee))
self.employee = None
return struct and struct[0][0] or ''
def pull_sal_struct(self, struct):
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
self.update(make_salary_slip(struct, self).as_dict())
def pull_emp_details(self):
emp = frappe.db.get_value("Employee", self.employee,
["bank_name", "bank_ac_no"], as_dict=1)
if emp:
self.bank_name = emp.bank_name
self.bank_account_no = emp.bank_ac_no
def get_leave_details(self, lwp=None):
if not self.fiscal_year:
self.fiscal_year = frappe.db.get_default("fiscal_year")
if not self.month:
self.month = "%02d" % getdate(nowdate()).month
m = frappe.get_doc('Process Payroll').get_month_details(self.fiscal_year, self.month)
holidays = self.get_holidays_for_employee(m)
if not cint(frappe.db.get_value("HR Settings", "HR Settings",
"include_holidays_in_total_working_days")):
m["month_days"] -= len(holidays)
if m["month_days"] < 0:
frappe.throw(_("There are more holidays than working days this month."))
if not lwp:
lwp = self.calculate_lwp(holidays, m)
self.total_days_in_month = m['month_days']
self.leave_without_pay = lwp
payment_days = flt(self.get_payment_days(m)) - flt(lwp)
self.payment_days = payment_days > 0 and payment_days or 0
def get_payment_days(self, m):
payment_days = m['month_days']
emp = frappe.db.sql("select date_of_joining, relieving_date from `tabEmployee` \
where name = %s", self.employee, as_dict=1)[0]
if emp['relieving_date']:
if getdate(emp['relieving_date']) > m['month_start_date'] and \
getdate(emp['relieving_date']) < m['month_end_date']:
payment_days = getdate(emp['relieving_date']).day
elif getdate(emp['relieving_date']) < m['month_start_date']:
frappe.throw(_("Employee relieved on {0} must be set as 'Left'").format(emp["relieving_date"]))
if emp['date_of_joining']:
if getdate(emp['date_of_joining']) > m['month_start_date'] and \
getdate(emp['date_of_joining']) < m['month_end_date']:
payment_days = payment_days - getdate(emp['date_of_joining']).day + 1
elif getdate(emp['date_of_joining']) > m['month_end_date']:
payment_days = 0
return payment_days
def get_holidays_for_employee(self, m):
holidays = frappe.db.sql("""select t1.holiday_date
from `tabHoliday` t1, tabEmployee t2
where t1.parent = t2.holiday_list and t2.name = %s
and t1.holiday_date between %s and %s""",
(self.employee, m['month_start_date'], m['month_end_date']))
if not holidays:
holidays = frappe.db.sql("""select t1.holiday_date
from `tabHoliday` t1, `tabHoliday List` t2
where t1.parent = t2.name and ifnull(t2.is_default, 0) = 1
and t2.fiscal_year = %s
and t1.holiday_date between %s and %s""", (self.fiscal_year,
m['month_start_date'], m['month_end_date']))
holidays = [cstr(i[0]) for i in holidays]
return holidays
def calculate_lwp(self, holidays, m):
lwp = 0
for d in range(m['month_days']):
dt = add_days(cstr(m['month_start_date']), d)
if dt not in holidays:
leave = frappe.db.sql("""
select t1.name, t1.half_day
from `tabLeave Application` t1, `tabLeave Type` t2
where t2.name = t1.leave_type
and ifnull(t2.is_lwp, 0) = 1
and t1.docstatus = 1
and t1.employee = %s
and %s between from_date and to_date
""", (self.employee, dt))
if leave:
lwp = cint(leave[0][1]) and (lwp + 0.5) or (lwp + 1)
return lwp
def check_existing(self):
ret_exist = frappe.db.sql("""select name from `tabSalary Slip`
where month = %s and fiscal_year = %s and docstatus != 2
and employee = %s and name != %s""",
(self.month, self.fiscal_year, self.employee, self.name))
if ret_exist:
self.employee = ''
frappe.throw(_("Salary Slip of employee {0} already created for this month").format(self.employee))
def validate(self):
from frappe.utils import money_in_words
self.check_existing()
if not (len(self.get("earnings")) or
len(self.get("deductions"))):
self.get_emp_and_leave_details()
else:
self.get_leave_details(self.leave_without_pay)
if not self.net_pay:
self.calculate_net_pay()
company_currency = get_company_currency(self.company)
self.total_in_words = money_in_words(self.rounded_total, company_currency)
set_employee_name(self)
def calculate_earning_total(self):
self.gross_pay = flt(self.arrear_amount) + flt(self.leave_encashment_amount)
for d in self.get("earnings"):
if cint(d.e_depends_on_lwp) == 1:
d.e_modified_amount = rounded((flt(d.e_amount) * flt(self.payment_days)
/ cint(self.total_days_in_month)), self.precision("e_modified_amount", "earnings"))
elif not self.payment_days:
d.e_modified_amount = 0
elif not d.e_modified_amount:
d.e_modified_amount = d.e_amount
self.gross_pay += flt(d.e_modified_amount)
def calculate_ded_total(self):
self.total_deduction = 0
for d in self.get('deductions'):
if cint(d.d_depends_on_lwp) == 1:
d.d_modified_amount = rounded((flt(d.d_amount) * flt(self.payment_days)
/ cint(self.total_days_in_month)), self.precision("d_modified_amount", "deductions"))
elif not self.payment_days:
d.d_modified_amount = 0
elif not d.d_modified_amount:
d.d_modified_amount = d.d_amount
self.total_deduction += flt(d.d_modified_amount)
def calculate_net_pay(self):
disable_rounded_total = cint(frappe.db.get_value("Global Defaults", None, "disable_rounded_total"))
self.calculate_earning_total()
self.calculate_ded_total()
self.net_pay = flt(self.gross_pay) - flt(self.total_deduction)
self.rounded_total = rounded(self.net_pay,
self.precision("net_pay") if disable_rounded_total else 0)
def on_submit(self):
if(self.email_check == 1):
self.send_mail_funct()
def send_mail_funct(self):
receiver = frappe.db.get_value("Employee", self.employee, "company_email")
if receiver:
subj = 'Salary Slip - ' + cstr(self.month) +'/'+cstr(self.fiscal_year)
frappe.sendmail([receiver], subject=subj, message = _("Please see attachment"),
attachments=[frappe.attach_print(self.doctype, self.name, file_name=self.name)])
else:
msgprint(_("Company Email ID not found, hence mail not sent"))
| meisterkleister/erpnext | erpnext/hr/doctype/salary_slip/salary_slip.py | Python | agpl-3.0 | 7,265 | 0.025327 |
from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib.auth.views import login, logout
import views
urlpatterns = patterns(
'gauth',
url(r'^login/$', login, {'template_name': 'login.html'}, name='login'),
url(r'^login/$', logout, {'template_name': 'logout.html'}, name='logout'),
url(r'^oauth2_begin/$', views.oauth2_begin, name='oauth2_begin'),
url(r'^' + settings.OAUTH2_CALLBACK + '/$', views.oauth2_callback),
url(r'^oauth2_complete/$', views.oauth2_complete, name='oauth2_complete'),
)
| Fl0r14n/django_googleapi | gdrive/urls.py | Python | mit | 560 | 0 |
#
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Based on glance/api/policy.py
"""Policy Engine For Heat."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import opts
from oslo_policy import policy
from oslo_utils import excutils
from heat.common import exception
from heat.common.i18n import _
from heat import policies
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DEFAULT_RULES = policy.Rules.from_dict({'default': '!'})
DEFAULT_RESOURCE_RULES = policy.Rules.from_dict({'default': '@'})
# TODO(gmann): Remove setting the default value of config policy_file
# once oslo_policy change the default value to 'policy.yaml'.
# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
DEFAULT_POLICY_FILE = 'policy.yaml'
opts.set_defaults(CONF, DEFAULT_POLICY_FILE)
ENFORCER = None
class Enforcer(object):
"""Responsible for loading and enforcing rules."""
def __init__(self, scope='heat', exc=exception.Forbidden,
default_rule=DEFAULT_RULES['default'], policy_file=None):
self.scope = scope
self.exc = exc
self.default_rule = default_rule
self.enforcer = policy.Enforcer(
CONF, default_rule=default_rule, policy_file=policy_file)
self.log_not_registered = True
# TODO(ramishra) Remove this once remove the deprecated rules.
self.enforcer.suppress_deprecation_warnings = True
# register rules
self.enforcer.register_defaults(policies.list_rules())
self.file_rules = self.enforcer.file_rules
self.registered_rules = self.enforcer.registered_rules
def set_rules(self, rules, overwrite=True):
"""Create a new Rules object based on the provided dict of rules."""
rules_obj = policy.Rules(rules, self.default_rule)
self.enforcer.set_rules(rules_obj, overwrite)
def load_rules(self, force_reload=False):
"""Set the rules found in the json file on disk."""
self.enforcer.load_rules(force_reload)
def _check(self, context, rule, target, exc,
is_registered_policy=False, *args, **kwargs):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param rule: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
do_raise = False if not exc else True
credentials = context.to_policy_values()
if is_registered_policy:
try:
return self.enforcer.authorize(rule, target, credentials,
do_raise=do_raise,
exc=exc, action=rule)
except policy.PolicyNotRegistered:
if self.log_not_registered:
with excutils.save_and_reraise_exception():
LOG.exception(_('Policy not registered.'))
else:
raise
else:
return self.enforcer.enforce(rule, target, credentials,
do_raise, exc=exc, *args, **kwargs)
def enforce(self, context, action, scope=None, target=None,
is_registered_policy=False):
"""Verifies that the action is valid on the target in this context.
:param context: Heat request context
:param action: String representing the action to be checked
:param target: Dictionary representing the object of the action.
:raises heat.common.exception.Forbidden: When permission is denied
(or self.exc if supplied).
:returns: A non-False value if access is allowed.
"""
_action = '%s:%s' % (scope or self.scope, action)
_target = target or {}
return self._check(context, _action, _target, self.exc, action=action,
is_registered_policy=is_registered_policy)
def check_is_admin(self, context):
"""Whether or not is admin according to policy.
By default the rule will check whether or not roles contains
'admin' role and is admin project.
:param context: Heat request context
:returns: A non-False value if the user is admin according to policy
"""
return self._check(context, 'context_is_admin', target={}, exc=None,
is_registered_policy=True)
def get_policy_enforcer():
# This method is used by oslopolicy CLI scripts to generate policy
# files from overrides on disk and defaults in code.
CONF([], project='heat')
return get_enforcer()
def get_enforcer():
global ENFORCER
if ENFORCER is None:
ENFORCER = Enforcer()
return ENFORCER
class ResourceEnforcer(Enforcer):
def __init__(self, default_rule=DEFAULT_RESOURCE_RULES['default'],
**kwargs):
super(ResourceEnforcer, self).__init__(
default_rule=default_rule, **kwargs)
self.log_not_registered = False
def _enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
try:
result = super(ResourceEnforcer, self).enforce(
context, res_type,
scope=scope or 'resource_types',
target=target, is_registered_policy=is_registered_policy)
except policy.PolicyNotRegistered:
result = True
except self.exc as ex:
LOG.info(str(ex))
raise
if not result:
if self.exc:
raise self.exc(action=res_type)
return result
def enforce(self, context, res_type, scope=None, target=None,
is_registered_policy=False):
# NOTE(pas-ha): try/except just to log the exception
result = self._enforce(context, res_type, scope, target,
is_registered_policy=is_registered_policy)
if result:
# check for wildcard resource types
subparts = res_type.split("::")[:-1]
subparts.append('*')
res_type_wc = "::".join(subparts)
try:
return self._enforce(context, res_type_wc, scope, target,
is_registered_policy=is_registered_policy)
except self.exc:
raise self.exc(action=res_type)
return result
def enforce_stack(self, stack, scope=None, target=None,
is_registered_policy=False):
for res in stack.resources.values():
self.enforce(stack.context, res.type(), scope=scope, target=target,
is_registered_policy=is_registered_policy)
| openstack/heat | heat/common/policy.py | Python | apache-2.0 | 7,615 | 0 |
import time
import logging
import struct
import socket
from mesos.interface.mesos_pb2 import TASK_LOST, MasterInfo
from .messages_pb2 import (
RegisterFrameworkMessage, ReregisterFrameworkMessage,
DeactivateFrameworkMessage, UnregisterFrameworkMessage,
ResourceRequestMessage, ReviveOffersMessage, LaunchTasksMessage, KillTaskMessage,
StatusUpdate, StatusUpdateAcknowledgementMessage, FrameworkToExecutorMessage,
ReconcileTasksMessage
)
from .process import UPID, Process, async
logger = logging.getLogger(__name__)
class MesosSchedulerDriver(Process):
def __init__(self, sched, framework, master_uri):
Process.__init__(self, 'scheduler')
self.sched = sched
#self.executor_info = executor_info
self.master_uri = master_uri
self.framework = framework
self.framework.failover_timeout = 100
self.framework_id = framework.id
self.master = None
self.detector = None
self.connected = False
self.savedOffers = {}
self.savedSlavePids = {}
@async # called by detector
def onNewMasterDetectedMessage(self, data):
try:
info = MasterInfo()
info.ParseFromString(data)
ip = socket.inet_ntoa(struct.pack('<I', info.ip))
master = UPID('master@%s:%s' % (ip, info.port))
except:
master = UPID(data)
self.connected = False
self.register(master)
@async # called by detector
def onNoMasterDetectedMessage(self):
self.connected = False
self.master = None
def register(self, master):
if self.connected or self.aborted:
return
if master:
if not self.framework_id.value:
msg = RegisterFrameworkMessage()
msg.framework.MergeFrom(self.framework)
else:
msg = ReregisterFrameworkMessage()
msg.framework.MergeFrom(self.framework)
msg.failover = True
self.send(master, msg)
self.delay(2, lambda:self.register(master))
def onFrameworkRegisteredMessage(self, framework_id, master_info):
self.framework_id = framework_id
self.framework.id.MergeFrom(framework_id)
self.connected = True
self.master = UPID('master@%s:%s' % (socket.inet_ntoa(struct.pack('<I', master_info.ip)), master_info.port))
self.link(self.master, self.onDisconnected)
self.sched.registered(self, framework_id, master_info)
def onFrameworkReregisteredMessage(self, framework_id, master_info):
assert self.framework_id == framework_id
self.connected = True
self.master = UPID('master@%s:%s' % (socket.inet_ntoa(struct.pack('<I', master_info.ip)), master_info.port))
self.link(self.master, self.onDisconnected)
self.sched.reregistered(self, master_info)
def onDisconnected(self):
self.connected = False
logger.warning("disconnected from master")
self.delay(5, lambda:self.register(self.master))
def onResourceOffersMessage(self, offers, pids):
for offer, pid in zip(offers, pids):
self.savedOffers.setdefault(offer.id.value, {})[offer.slave_id.value] = UPID(pid)
self.sched.resourceOffers(self, list(offers))
def onRescindResourceOfferMessage(self, offer_id):
self.savedOffers.pop(offer_id.value, None)
self.sched.offerRescinded(self, offer_id)
def onStatusUpdateMessage(self, update, pid=''):
if self.sender.addr != self.master.addr:
logger.warning("ignore status update message from %s instead of leader %s", self.sender, self.master)
return
assert self.framework_id == update.framework_id
self.sched.statusUpdate(self, update.status)
if not self.aborted and self.sender.addr and pid:
reply = StatusUpdateAcknowledgementMessage()
reply.framework_id.MergeFrom(self.framework_id)
reply.slave_id.MergeFrom(update.slave_id)
reply.task_id.MergeFrom(update.status.task_id)
reply.uuid = update.uuid
try: self.send(self.master, reply)
except IOError: pass
def onLostSlaveMessage(self, slave_id):
self.sched.slaveLost(self, slave_id)
def onExecutorToFrameworkMessage(self, slave_id, framework_id, executor_id, data):
self.sched.frameworkMessage(self, executor_id, slave_id, data)
def onFrameworkErrorMessage(self, message, code=0):
self.sched.error(self, message)
def start(self):
Process.start(self)
uri = self.master_uri
if uri.startswith('zk://') or uri.startswith('zoo://'):
from .detector import MasterDetector
self.detector = MasterDetector(uri[uri.index('://') + 3:], self)
self.detector.start()
else:
if not ':' in uri:
uri += ':5050'
self.onNewMasterDetectedMessage('master@%s' % uri)
def abort(self):
if self.connected:
msg = DeactivateFrameworkMessage()
msg.framework_id.MergeFrom(self.framework_id)
self.send(self.master, msg)
Process.abort(self)
def stop(self, failover=False):
if self.connected and not failover:
msg = UnregisterFrameworkMessage()
msg.framework_id.MergeFrom(self.framework_id)
self.send(self.master, msg)
if self.detector:
self.detector.stop()
Process.stop(self)
@async
def requestResources(self, requests):
if not self.connected:
return
msg = ResourceRequestMessage()
msg.framework_id.MergeFrom(self.framework_id)
for req in requests:
msg.requests.add().MergeFrom(req)
self.send(self.master, msg)
@async
def reviveOffers(self):
if not self.connected:
return
msg = ReviveOffersMessage()
msg.framework_id.MergeFrom(self.framework_id)
self.send(self.master, msg)
@async
def reconcileTasks(self, statuses=None):
if not self.connected:
return
msg = ReconcileTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
if statuses is not None:
msg.statuses = statuses
self.send(self.master, msg)
def launchTasks(self, offer_id, tasks, filters):
if not self.connected or offer_id.value not in self.savedOffers:
for task in tasks:
update = StatusUpdate()
update.framework_id.MergeFrom(self.framework_id)
update.status.task_id.MergeFrom(task.task_id)
update.status.state = TASK_LOST
update.status.message = 'Master disconnected' if not self.connected else "invalid offer_id"
update.timestamp = time.time()
update.uuid = ''
self.onStatusUpdateMessage(update)
return
msg = LaunchTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.offer_ids.add().MergeFrom(offer_id)
msg.filters.MergeFrom(filters)
for task in tasks:
msg.tasks.add().MergeFrom(task)
pid = self.savedOffers.get(offer_id.value, {}).get(task.slave_id.value)
if pid and task.slave_id.value not in self.savedSlavePids:
self.savedSlavePids[task.slave_id.value] = pid
self.savedOffers.pop(offer_id.value)
self.send(self.master, msg)
def declineOffer(self, offer_id, filters=None):
if not self.connected:
return
msg = LaunchTasksMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.offer_ids.add().MergeFrom(offer_id)
if filters:
msg.filters.MergeFrom(filters)
self.send(self.master, msg)
@async
def killTask(self, task_id):
if not self.connected:
return
msg = KillTaskMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.task_id.MergeFrom(task_id)
self.send(self.master, msg)
@async
def sendFrameworkMessage(self, executor_id, slave_id, data):
if not self.connected:
return
msg = FrameworkToExecutorMessage()
msg.framework_id.MergeFrom(self.framework_id)
msg.executor_id.MergeFrom(executor_id)
msg.slave_id.MergeFrom(slave_id)
msg.data = data
slave = self.savedSlavePids.get(slave_id.value, self.master) # can not send to slave directly
self.send(slave, msg)
| sdgdsffdsfff/pymesos | pymesos/scheduler.py | Python | bsd-3-clause | 8,630 | 0.003708 |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
import pyrotrfid
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyrotrfid'
copyright = u'GLP3'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pyrotrfid.__version__
# The full version, including alpha/beta/rc tags.
release = pyrotrfid.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'rtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = project + " v" + release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {'**': 'links.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyrotrfiddoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', 'pyrotrfid.tex', u'pyrotrfid Documentation', u'', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', 'pyrotrfid', u'pyrotrfid Documentation', [u''], 1)]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None)}
| xapple/pyrotrfid | doc/conf.py | Python | gpl-3.0 | 7,241 | 0.005662 |
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import flask
from oss_lib import config
from ceagle.api import client
from ceagle.api_fake_data import fake_regions
CONF = config.CONF
bp = flask.Blueprint("regions", __name__)
@bp.route("", defaults={"detailed": False})
@bp.route("/detailed", defaults={"detailed": True})
@fake_regions.get_regions
def get_regions(detailed):
regions = {}
for service_name in CONF["services"].keys():
if service_name == "infra":
continue # TODO(boris-42): This should not be checked here.
service_client = client.get_client(service_name)
resp, code = service_client.get("/api/v1/regions")
if code != 200:
# FIXME ADD LOGS HERE
continue
for r in resp:
regions.setdefault(r, {"services": []})
regions[r]["services"].append(service_name)
if not detailed:
return flask.jsonify({"regions": list(regions.keys())})
return flask.jsonify({"regions": regions})
def get_blueprints():
return [["/regions", bp]]
| seecloud/ceagle | ceagle/api/v1/regions.py | Python | apache-2.0 | 1,652 | 0 |
"""Support for BMW car locks with BMW ConnectedDrive."""
import logging
from homeassistant.components.bmw_connected_drive import DOMAIN as BMW_DOMAIN
from homeassistant.components.lock import LockDevice
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
DEPENDENCIES = ['bmw_connected_drive']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BMW Connected Drive lock."""
accounts = hass.data[BMW_DOMAIN]
_LOGGER.debug('Found BMW accounts: %s',
', '.join([a.name for a in accounts]))
devices = []
for account in accounts:
if not account.read_only:
for vehicle in account.account.vehicles:
device = BMWLock(account, vehicle, 'lock', 'BMW lock')
devices.append(device)
add_entities(devices, True)
class BMWLock(LockDevice):
"""Representation of a BMW vehicle lock."""
def __init__(self, account, vehicle, attribute: str, sensor_name):
"""Initialize the lock."""
self._account = account
self._vehicle = vehicle
self._attribute = attribute
self._name = '{} {}'.format(self._vehicle.name, self._attribute)
self._unique_id = '{}-{}'.format(self._vehicle.vin, self._attribute)
self._sensor_name = sensor_name
self._state = None
@property
def should_poll(self):
"""Do not poll this class.
Updates are triggered from BMWConnectedDriveAccount.
"""
return False
@property
def unique_id(self):
"""Return the unique ID of the lock."""
return self._unique_id
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the lock."""
vehicle_state = self._vehicle.state
return {
'car': self._vehicle.name,
'door_lock_state': vehicle_state.door_lock_state.value
}
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state == STATE_LOCKED
def lock(self, **kwargs):
"""Lock the car."""
_LOGGER.debug("%s: locking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_LOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_lock()
def unlock(self, **kwargs):
"""Unlock the car."""
_LOGGER.debug("%s: unlocking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_UNLOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_unlock()
def update(self):
"""Update state of the lock."""
from bimmer_connected.state import LockState
_LOGGER.debug("%s: updating data for %s", self._vehicle.name,
self._attribute)
vehicle_state = self._vehicle.state
# Possible values: LOCKED, SECURED, SELECTIVE_LOCKED, UNLOCKED
self._state = STATE_LOCKED \
if vehicle_state.door_lock_state \
in [LockState.LOCKED, LockState.SECURED] \
else STATE_UNLOCKED
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
| nugget/home-assistant | homeassistant/components/bmw_connected_drive/lock.py | Python | apache-2.0 | 3,775 | 0 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""External collection 'core' file.
Perform search, database access."""
__revision__ = "$Id$"
import cgi
import sys
from copy import copy
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from invenio.config import CFG_SITE_LANG
from invenio.legacy.dbquery import run_sql, OperationalError, ProgrammingError
from invenio.base.i18n import gettext_set_language
from .config import CFG_EXTERNAL_COLLECTION_TIMEOUT
from .searcher import external_collections_dictionary
from .getter import HTTPAsyncPageGetter, async_download
from .templates import print_results, print_timeout
from .utils import get_collection_id, get_collection_descendants, \
warning, get_verbose_print
import invenio.legacy.template
# Global variables
template = invenio.legacy.template.load('websearch_external_collections')
external_collections_state = None
dico_collection_external_searches = None
dico_collection_seealso = None
#dico_collection_external_searches = {}
#dico_collection_seealso = {}
def print_external_results_overview(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG, print_overview=True):
"""Print the external collection overview box. Return the selected external collections and parsed query"""
from invenio.legacy.search_engine import create_basic_search_units
assert req
vprint = get_verbose_print(req, 'External collection (print_external_results_overview): ', verbosity_level)
pattern = bind_patterns(pattern_list)
vprint(3, 'pattern = %s' % cgi.escape(pattern))
if not pattern:
return (None, None, None, None)
basic_search_units = create_basic_search_units(None, pattern, field)
vprint(3, 'basic_search_units = %s' % cgi.escape(repr(basic_search_units)))
(search_engines, seealso_engines) = select_external_engines(current_collection, external_collection)
vprint(3, 'search_engines = ' + str(search_engines))
vprint(3, 'seealso_engines = ' + str(seealso_engines))
search_engines_list = external_collection_sort_engine_by_name(search_engines)
vprint(3, 'search_engines_list (sorted) : ' + str(search_engines_list))
if print_overview:
html = template.external_collection_overview(lang, search_engines_list)
req.write(html)
return (search_engines, seealso_engines, pattern, basic_search_units)
def perform_external_collection_search(req, current_collection, pattern_list, field,
external_collection, verbosity_level=0, lang=CFG_SITE_LANG,
selected_external_collections_infos=None, print_overview=True,
print_search_info=True, print_see_also_box=True, print_body=True):
"""Search external collection and print the seealso box."""
vprint = get_verbose_print(req, 'External collection: ', verbosity_level)
if selected_external_collections_infos:
(search_engines, seealso_engines, pattern, basic_search_units) = selected_external_collections_infos
else:
(search_engines, seealso_engines, pattern, basic_search_units) = print_external_results_overview(req,
current_collection, pattern_list, field, external_collection, verbosity_level, lang, print_overview=print_overview)
if not pattern:
return
do_external_search(req, lang, vprint, basic_search_units, search_engines, print_search_info, print_body)
if print_see_also_box:
create_seealso_box(req, lang, vprint, basic_search_units, seealso_engines, pattern)
vprint(3, 'end')
def bind_patterns(pattern_list):
"""Combine a list of patterns in an unique pattern.
pattern_list[0] should be the standart search pattern,
pattern_list[1:] are advanced search patterns."""
# just in case an empty list is fed to this function
try:
if pattern_list[0]:
return pattern_list[0]
except IndexError:
return None
pattern = ""
for pattern_part in pattern_list[1:]:
if pattern_part:
pattern += " " + pattern_part
return pattern.strip()
# See also box
def create_seealso_box(req, lang, vprint, basic_search_units=None, seealso_engines=None, query=''):
"Create the box that proposes links to other useful search engines like Google."
vprint(3, 'Create seealso box')
seealso_engines_list = external_collection_sort_engine_by_name(seealso_engines)
vprint(3, 'seealso_engines_list = ' + str(seealso_engines_list))
links = build_seealso_links(basic_search_units, seealso_engines_list, req, lang, query)
html = template.external_collection_seealso_box(lang, links)
req.write(html)
def build_seealso_links(basic_search_units, seealso_engines, req, lang, query):
"""Build the links for the see also box."""
_ = gettext_set_language(lang)
links = []
for engine in seealso_engines:
url = engine.build_search_url(basic_search_units, req.args, lang)
user_url = engine.build_user_search_url(basic_search_units, req.args, lang)
url = user_url or url
if url:
links.append('<a class="google" href="%(url)s">%(query)s %(text_in)s %(name)s</a>' % \
{'url': cgi.escape(url),
'query': cgi.escape(query),
'text_in': _('in'),
'name': _(engine.name)})
return links
# Selection
def select_external_engines(collection_name, selected_external_searches):
"""Build a tuple of two sets. The first one is the list of engine to use for an external search and the
second one is for the seealso box."""
collection_id = get_collection_id(collection_name)
if not collection_id:
return (None, None)
if not type(selected_external_searches) is list:
selected_external_searches = [selected_external_searches]
seealso_engines = set()
search_engines = set()
if collection_id in dico_collection_seealso:
seealso_engines = copy(dico_collection_seealso[collection_id])
if collection_id in dico_collection_external_searches:
seealso_engines = seealso_engines.union(dico_collection_external_searches[collection_id])
for ext_search_name in selected_external_searches:
if ext_search_name in external_collections_dictionary:
engine = external_collections_dictionary[ext_search_name]
if engine.parser:
search_engines.add(engine)
else:
warning('select_external_engines: %(ext_search_name)s unknown.' % locals())
seealso_engines = seealso_engines.difference(search_engines)
return (search_engines, seealso_engines)
# Search
def do_external_search(req, lang, vprint, basic_search_units, search_engines, print_search_info=True, print_body=True):
"""Make the external search."""
_ = gettext_set_language(lang)
vprint(3, 'beginning external search')
engines_list = []
for engine in search_engines:
url = engine.build_search_url(basic_search_units, req.args, lang)
user_url = engine.build_user_search_url(basic_search_units, req.args, lang)
if url:
engines_list.append([url, engine, user_url])
pagegetters_list = [HTTPAsyncPageGetter(engine[0]) for engine in engines_list]
def finished(pagegetter, data, current_time, print_search_info=True, print_body=True):
"""Function called, each time the download of a web page finish.
Will parse and print the results of this page."""
print_results(req, lang, pagegetter, data, current_time, print_search_info, print_body)
finished_list = async_download(pagegetters_list, finished, engines_list, CFG_EXTERNAL_COLLECTION_TIMEOUT, print_search_info, print_body)
for (finished, engine) in zip(finished_list, engines_list):
if not finished:
url = engine[2] or engine[0]
name = engine[1].name
print_timeout(req, lang, engine[1], name, url)
# Database management
def external_collection_load_states():
global external_collections_state, dico_collection_external_searches, dico_collection_seealso
external_collections_state = {}
dico_collection_external_searches = {}
dico_collection_seealso = {}
query = "SELECT collection_externalcollection.id_collection, collection_externalcollection.type, externalcollection.name FROM collection_externalcollection, externalcollection WHERE collection_externalcollection.id_externalcollection = externalcollection.id;"
try:
results = run_sql(query)
except (OperationalError, ProgrammingError):
results = None
if results:
for result in results:
collection_id = int(result[0])
search_type = int(result[1])
engine_name = result[2]
if engine_name not in external_collections_dictionary:
warning("No search engine : " + engine_name)
continue
engine = external_collections_dictionary[engine_name]
if collection_id not in external_collections_state:
external_collections_state[collection_id] = {}
col_states = external_collections_state[collection_id]
col_states[engine] = search_type
dictionary = None
if search_type == 1:
dictionary = dico_collection_seealso
if search_type in [2, 3]:
dictionary = dico_collection_external_searches
if dictionary is None:
continue
if collection_id not in dictionary:
dictionary[collection_id] = set()
engine_set = dictionary[collection_id]
engine_set.add(engine)
def external_collection_get_state(external_collection, collection_id):
external_collection_load_states()
if collection_id not in external_collections_state:
return 0
col_states = external_collections_state[collection_id]
if external_collection not in col_states:
return 0
return col_states[external_collection]
def external_collection_get_update_state_list(external_collection, collection_id, state, recurse=False):
changes = []
if external_collection_get_state(external_collection, collection_id) != state:
changes = ['(%(collection_id)d, %(id_externalcollection)d, %(state)d)' %
{'collection_id': collection_id, 'id_externalcollection': external_collection_getid(external_collection), 'state': state}]
if not recurse:
return changes
for descendant_id in get_collection_descendants(collection_id):
changes += external_collection_get_update_state_list(external_collection, descendant_id, state)
return changes
def external_collection_apply_changes(changes_list):
if not changes_list:
return
sql_values = ", ".join(changes_list)
sql = 'INSERT INTO collection_externalcollection (id_collection, id_externalcollection, type) VALUES ' + sql_values + 'ON DUPLICATE KEY UPDATE type=VALUES(type);'
run_sql(sql)
# Misc functions
def external_collection_sort_engine_by_name(engines_set):
"""Return a list of sorted (by name) search engines."""
if not engines_set:
return []
engines_list = [engine for engine in engines_set]
engines_list.sort(lambda x, y: cmp(x.name, y.name))
return engines_list
# External search ID
def external_collection_getid(external_collection):
"""Return the id of an external_collection. Will create a new entry in DB if needed."""
if 'id' in external_collection.__dict__:
return external_collection.id
query = 'SELECT id FROM externalcollection WHERE name="%(name)s";' % {'name': external_collection.name}
results = run_sql(query)
if not results:
query = 'INSERT INTO externalcollection (name) VALUES ("%(name)s");' % {'name': external_collection.name}
run_sql(query)
return external_collection_getid(external_collection)
external_collection.id = results[0][0]
return external_collection.id
def get_external_collection_engine(external_collection_name):
"""Return the external collection engine given its name"""
if external_collection_name in external_collections_dictionary:
return external_collections_dictionary[external_collection_name]
else:
return None
# Load db infos if it's not already done.
if external_collections_state is None:
external_collection_load_states()
# Hosted Collections related functions (the following functions should eventually be regrouped as above)
# These functions could eventually be placed into there own file, ex. websearch_hosted_collections.py
def calculate_hosted_collections_results(req, pattern_list, field, hosted_collections, verbosity_level=0,
lang=CFG_SITE_LANG, timeout=CFG_EXTERNAL_COLLECTION_TIMEOUT):
"""Ruturn a list of the various results for a every hosted collection organized in tuples"""
# normally, the following should be checked before even running this function so the following line could be removed
if not hosted_collections: return (None, None)
vprint = get_verbose_print(req, 'Hosted collections: ', verbosity_level)
vprint(3, 'pattern_list = %s, field = %s' % (cgi.escape(repr(pattern_list)), cgi.escape(field)))
# firstly we calculate the search parameters, i.e. the actual hosted search engines and the basic search units
(hosted_search_engines, basic_search_units) = \
calculate_hosted_collections_search_params(req,
pattern_list,
field,
hosted_collections,
verbosity_level)
# in case something went wrong with the above calculation just return None
# however, once we run this function no fail should be expected here
# UPDATE : let search go on even there are no basic search units (an empty pattern_list and field)
#if basic_search_units == None or len(hosted_search_engines) == 0: return (None, None)
if len(hosted_search_engines) == 0: return (None, None)
# finally return the list of tuples with the results
return do_calculate_hosted_collections_results(req, lang, vprint, verbosity_level, basic_search_units, hosted_search_engines, timeout)
vprint(3, 'end')
def calculate_hosted_collections_search_params(req,
pattern_list,
field,
hosted_collections,
verbosity_level=0):
"""Calculate the searching parameters for the selected hosted collections
i.e. the actual hosted search engines and the basic search units"""
from invenio.legacy.search_engine import create_basic_search_units
assert req
vprint = get_verbose_print(req, 'Hosted collections (calculate_hosted_collections_search_params): ', verbosity_level)
pattern = bind_patterns(pattern_list)
vprint(3, 'pattern = %s' % cgi.escape(pattern))
# if for any strange reason there is no pattern, just return
# UPDATE : let search go on even there is no pattern (an empty pattern_list and field)
#if not pattern: return (None, None)
# calculate the basic search units
basic_search_units = create_basic_search_units(None, pattern, field)
vprint(3, 'basic_search_units = %s' % cgi.escape(repr(basic_search_units)))
# calculate the set of hosted search engines
hosted_search_engines = select_hosted_search_engines(hosted_collections)
vprint(3, 'hosted_search_engines = ' + str(hosted_search_engines))
# no need really to print out a sorted list of the hosted search engines, is there? I'll leave this commented out
#hosted_search_engines_list = external_collection_sort_engine_by_name(hosted_search_engines)
#vprint(3, 'hosted_search_engines_list (sorted) : ' + str(hosted_search_engines_list))
return (hosted_search_engines, basic_search_units)
def select_hosted_search_engines(selected_hosted_collections):
"""Build the set of engines to be used for the hosted collections"""
if not type(selected_hosted_collections) is list:
selected_hosted_collections = [selected_hosted_collections]
hosted_search_engines = set()
for hosted_collection_name in selected_hosted_collections:
if hosted_collection_name in external_collections_dictionary:
engine = external_collections_dictionary[hosted_collection_name]
# the hosted collection cannot present its results unless it has a parser implemented
if engine.parser:
hosted_search_engines.add(engine)
else:
warning('select_hosted_search_engines: %(hosted_collection_name)s unknown.' % locals())
return hosted_search_engines
def do_calculate_hosted_collections_results(req, lang, vprint, verbosity_level, basic_search_units, hosted_search_engines,
timeout=CFG_EXTERNAL_COLLECTION_TIMEOUT):
"""Actually search the hosted collections and return their results and information in a list of tuples.
One tuple for each hosted collection. Handles timeouts"""
_ = gettext_set_language(lang)
if not vprint:
vprint = get_verbose_print(req, 'Hosted collections (calculate_hosted_collections_search_params): ', verbosity_level)
# defining vprint at this moment probably means we'll just run this one function at this time, therefore the "verbose"
# end hosted search string will not be printed (it is normally printed by the initial calculate function)
# Therefore, either define a flag here to print it by the end of this function or redefine the whole "verbose"
# printing logic of the above functions
vprint(3, 'beginning hosted search')
# list to hold the hosted search engines and their respective search urls
engines_list = []
# list to hold the non timed out results
results_list = []
# list to hold all the results
full_results_list = []
# list to hold all the timeouts
timeout_list = []
# in case this is an engine-only list
if type(hosted_search_engines) is set:
for engine in hosted_search_engines:
url = engine.build_search_url(basic_search_units, req.args, lang)
user_url = engine.build_user_search_url(basic_search_units, req.args, lang)
if url:
engines_list.append([url, engine, user_url])
# in case we are iterating a pre calculated url+engine list
elif type(hosted_search_engines) is list:
for engine in hosted_search_engines:
engines_list.append(engine)
# in both the above cases we end up with a [[search url], [engine]] kind of list
# create the list of search urls to be handed to the asynchronous getter
pagegetters_list = [HTTPAsyncPageGetter(engine[0]) for engine in engines_list]
# function to be run on every result
def finished(pagegetter, data, current_time):
"""Function called, each time the download of a web page finish.
Will parse and print the results of this page."""
# each pagegetter that didn't timeout is added to this list
results_list.append((pagegetter, data, current_time))
# run the asynchronous getter
finished_list = async_download(pagegetters_list, finished, engines_list, timeout)
# create the complete list of tuples, one for each hosted collection, with the results and other information,
# including those that timed out
for (finished, engine) in zip(finished_list, engines_list): #finished_and_engines_list:
if finished:
for result in results_list:
if result[1] == engine:
# the engine is fed the results, it will be parsed later, at printing time
engine[1].parser.parse_and_get_results(result[0].data, feedonly=True)
## the list contains:
## * the engine itself: [ search url], [engine]
## * the parsed number of found results
## * the fetching time
full_results_list.append(
(engine, engine[1].parser.parse_num_results(), result[2])
)
break
elif not finished:
## the list contains:
## * the engine itself: [search url], [engine]
timeout_list.append(engine)
return (full_results_list, timeout_list)
| MSusik/invenio | invenio/legacy/websearch_external_collections/__init__.py | Python | gpl-2.0 | 21,464 | 0.00601 |
from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class product_category(osv.osv):
_inherit='product.category'
_columns = {
'sale_price' : fields.float('Sale Price',digits_compute=dp.get_precision('Product Price')),
'shape_id':fields.many2one('product.shape',string="Shape"),
'weight_from':fields.float('Weight From',digits_compute=dp.get_precision('Stock Weight')),
'weight_to':fields.float('Weight To',digits_compute=dp.get_precision('Stock Weight')),
'color_id':fields.many2one('product.color',string='Color'),
'clarity_id':fields.many2one('product.clarity',string='Clarity', ondelete='restrict'),
'shape_line':fields.one2many('shape.line','categ_id','Shape Lines'),
}
| manishpatell/erpcustomizationssaiimpex123qwe | addons/product_stone_search_ept/py/product/product_category.py | Python | agpl-3.0 | 912 | 0.024123 |
#!/usr/bin/python3
import gpxpy
import datetime
import time
import os
import gpxpy.gpx
import sqlite3
import pl
import re
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
filebase = os.environ["XDG_DATA_HOME"]+"/"+os.environ["APP_ID"].split('_')[0]
def create_gpx():
# Creating a new file:
# --------------------
gpx = gpxpy.gpx.GPX()
# Create first track in our GPX:
gpx_track = gpxpy.gpx.GPXTrack()
gpx.tracks.append(gpx_track)
# Create first segment in our GPX track:
gpx_segment = gpxpy.gpx.GPXTrackSegment()
gpx_track.segments.append(gpx_segment)
# Create points:
return gpx
def write_gpx(gpx,name,act_type):
# You can add routes and waypoints, too...
tzname=None
npoints=None
# polyline encoder default values
numLevels = 18;
zoomFactor = 2;
epsilon = 0.0;
forceEndpoints = True;
##print('Created GPX:', gpx.to_xml())
ts = int(time.time())
filename = "%s/%i.gpx" % (filebase,ts)
a = open(filename, 'w')
a.write(gpx.to_xml())
a.close()
gpx.simplify()
#gpx.reduce_points(1000)
trk = pl.read_gpx_trk(gpx.to_xml(),tzname,npoints,2,None)
try:
polyline=pl.print_gpx_google_polyline(trk,numLevels,zoomFactor,epsilon,forceEndpoints)
except UnboundLocalError as er:
print(er)
print("Not enough points to create a polyline")
polyline=""
#polyline="polyline"
add_run(gpx,name,act_type,filename,polyline)
def add_point(gpx,lat,lng,elev):
gpx.tracks[0].segments[0].points.append(gpxpy.gpx.GPXTrackPoint(lat, lng, elevation=elev,time=datetime.datetime.now()))
def add_run(gpx, name,act_type,filename,polyline):
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists activities
(id INTEGER PRIMARY KEY AUTOINCREMENT,name text, act_date text, distance text,
speed text, act_type text,filename text,polyline text)""")
sql = "INSERT INTO activities VALUES (?,?,?,?,?,?,?,?)"
start_time, end_time = gpx.get_time_bounds()
l2d='{:.3f}'.format(gpx.length_2d() / 1000.)
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx.get_moving_data()
print(max_speed)
#print('%sStopped distance: %sm' % stopped_distance)
maxspeed = 'Max speed: {:.2f}km/h'.format(max_speed * 60. ** 2 / 1000. if max_speed else 0)
duration = 'Duration: {:.2f}min'.format(gpx.get_duration() / 60)
print("-------------------------")
print(name)
print(start_time)
print(l2d)
print(maxspeed)
print("-------------------------")
try:
cursor.execute(sql, [None, name,start_time,l2d,duration,act_type,filename,polyline])
conn.commit()
except sqlite3.Error as er:
print(er)
conn.close()
def get_runs():
#add_run("1", "2", "3", "4")
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists activities
(id INTEGER PRIMARY KEY AUTOINCREMENT,name text, act_date text, distance text,
speed text, act_type text,filename text,polyline text)""")
ret_data=[]
sql = "SELECT * FROM activities LIMIT 30"
for i in cursor.execute(sql):
ret_data.append(dict(i))
conn.close()
return ret_data
def get_units():
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("""CREATE TABLE if not exists settings
(units text)""")
ret_data=[]
sql = "SELECT units FROM settings"
cursor.execute(sql)
data=cursor.fetchone()
if data is None:
print("NONESIES")
cursor.execute("INSERT INTO settings VALUES ('kilometers')")
conn.commit()
conn.close()
return "kilometers"
return data
def set_units(label):
os.makedirs(filebase, exist_ok=True)
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
cursor.execute("UPDATE settings SET units=? WHERE 1", (label,))
conn.commit()
conn.close()
def onetime_db_fix():
os.makedirs(filebase, exist_ok=True)
filename = "%s/%s" % (filebase,".dbfixed")
if not os.path.exists(filename):
print("Fixing db")
conn = sqlite3.connect('%s/activities.db' % filebase)
numonly = re.compile("(\d*\.\d*)")
cursor = conn.cursor()
a=get_runs()
sql="UPDATE activities SET distance=? WHERE id=?"
for i in a:
print(i["distance"])
b=numonly.search(i["distance"])
print(b.group(0))
print(b)
cursor.execute(sql, (b.group(0), i["id"]))
conn.commit()
conn.close()
dotfile=open(filename, "w")
dotfile.write("db fixed")
dotfile.close
else:
print("db already fixed")
def rm_run(run):
conn = sqlite3.connect('%s/activities.db' % filebase)
cursor = conn.cursor()
sql = "DELETE from activities WHERE id=?"
try:
cursor.execute(sql, [run])
conn.commit()
except sqlite3.Error as er:
print("-------------______---_____---___----____--____---___-----")
print(er)
conn.close()
def km_to_mi(km):
return km * 0.62137
def get_data():
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx.get_moving_data()
return moving_distance, moving_time
| VictorThompson/ActivityTracker | py/geepeeex.py | Python | gpl-3.0 | 5,539 | 0.017873 |
# Copyright 2021 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Mozaik Website Event Track",
"summary": """
This module allows to see the event menu configuration
even without activated debug mode""",
"version": "14.0.1.0.0",
"license": "AGPL-3",
"author": "ACSONE SA/NV",
"website": "https://github.com/OCA/mozaik",
"depends": [
# Odoo
"website_event_track",
],
"data": [
"views/event_event.xml",
],
}
| mozaik-association/mozaik | mozaik_website_event_track/__manifest__.py | Python | agpl-3.0 | 525 | 0 |
import cPickle as pkl
import pdb
import datetime
import time
import numpy as np
import pylab as pl
import scipy.stats
import scipy.special
from scipy.special import gamma
from scipy.misc import factorial
import gnumpy as gp
import data_helper
class RBM(object):
'''
Restricted Boltzmann Machine (RBM) using numpy
'''
def __init__(self, params={}):
'''
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
input:
-----------------
Nv: number of visible units
Nh: number of hidden units
vis_unit: type of visible unit {'binary','linear'}
('linear' = rectified linear unit)
vis_scale: maximum output value for linear visible units
(average std_dev is ~= 1 at this scale,
so pre-scale training data with this in mind)
bv: visible bias
other params:
-----------------
W: weight between current hidden and visible units (undirected)
[Nv x Nh]
bh: hidden bias
'''
dtype = 'float32'
Nv = params['Nv']
Nh = params['Nh']
vis_unit = params.get('vis_unit','binary')
vis_scale = params.get('vis_scale')
bv = params.get('bv')
Th = params.get('Th',0)
if vis_unit not in ['binary','linear']:
raise ValueError, 'Unknown visible unit type %s' % vis_unit
if vis_unit == 'linear':
if vis_scale is None:
raise ValueError, 'Must set vis_scale for linear visible units'
elif vis_unit == 'binary':
vis_scale = 1.
# W is initialized with `initial_W` which is uniformly sampled
# from -4.*sqrt(6./(Nv+Nh)) and 4.*sqrt(6./(Nh+Nv))
# the output of uniform if converted using asarray to dtype
W = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(Nv+Nh)),
high = 4*np.sqrt(6./(Nv+Nh)),
size = (Nv, Nh)),
dtype = dtype)
W = gp.garray(W)
bh = gp.zeros(Nh)
if bv is None :
bv = gp.zeros(Nv)
else:
bv = gp.garray(bv)
# params -------------------------------------------
self.dtype = 'float32'
self.Nv = Nv # num visible units
self.Nh = Nh # num hidden units
self.Th = Th # used for framing input
self.vis_unit = vis_unit # type of visible output unit
self.vis_scale = vis_scale # scale of linear output units
self.W = W # vis<->hid weights
self.bv = bv # vis bias
self.bh = bh # hid bias
self.W_update = gp.zeros((Nv,Nh))
self.bh_update = gp.zeros((Nh,))
self.bv_update = gp.zeros((Nv,))
self.params = [ 'dtype',
'vis_unit','vis_scale',
'Nv','Nh',
'W','bh','bv']
def save_params(self,filename=None):
'''
save parameters to file
'''
if filename is None:
fileid = np.random.randint(100000)
filename = 'RBM_%u.pkl' % fileid
params_out = {}
for p in self.params:
val = vars(self)[p]
if type(val) is gp.garray:
params_out[p] = val.as_numpy_array()
else:
params_out[p] = val
fp = open(filename,'wb')
pkl.dump(params_out,fp,protocol=-1)
fp.close()
print 'saved %s' % filename
def load_params(self,filename):
'''
load parameters from file
'''
fp = open(filename,'rb')
params_in = pkl.load(fp)
fp.close()
for key,value in params_in.iteritems():
vars(self)[key] = value
Nv,Nh = self.Nv,self.Nh
dtype = self.dtype
self.W_update = gp.zeros((Nv,Nh))
self.bh_update = gp.zeros((Nh,))
self.bv_update = gp.zeros((Nv,))
self.W = gp.garray(self.W)
self.bh = gp.garray(self.bh)
self.bv = gp.garray(self.bv)
def return_params(self):
'''
return a formatted string containing scalar parameters
'''
output = 'Nv=%u, Nh=%u, vis_unit=%s, vis_scale=%0.2f' \
% (self.Nv,self.Nh,self.vis_unit,self.vis_scale)
return output
def mean_field_h_given_v(self,v):
'''
compute mean-field reconstruction of P(h=1|v)
'''
prob = sigmoid(self.bh + gp.dot(v, self.W))
return prob
def mean_field_v_given_h(self,h):
'''
compute mean-field reconstruction of P(v|h)
'''
x = self.bv + gp.dot(h, self.W.T)
if self.vis_unit == 'binary':
return sigmoid(x)
elif self.vis_unit == 'linear':
return log_1_plus_exp(x) - log_1_plus_exp(x-self.vis_scale)
return prob
def sample_h_given_v(self,v):
'''
compute samples from P(h|v)
'''
prob = self.mean_field_h_given_v(v)
samples = prob.rand() < prob
return samples, prob
def sample_v_given_h(self,h):
'''
compute samples from P(v|h)
'''
if self.vis_unit == 'binary':
mean = self.mean_field_v_given_h(h)
samples = mean.rand() < mean
return samples, mean
elif self.vis_unit == 'linear':
x = self.bv + gp.dot(h, self.W.T)
# variance of noise is sigmoid(x) - sigmoid(x - vis_scale)
stddev = gp.sqrt(sigmoid(x) - sigmoid(x - self.vis_scale))
mean = log_1_plus_exp(x) - log_1_plus_exp(x-self.vis_scale)
noise = stddev * gp.randn(x.shape)
samples = mean + noise
samples[samples < 0] = 0
samples[samples > self.vis_scale] = self.vis_scale
return samples, mean
def cdk(self,K,v0_data,rate=0.001,momentum=0.0,weight_decay=0.001,noisy=0):
'''
compute K-step contrastive divergence update
input:
K - number of gibbs iterations (for cd-K)
v0_data - training data [N x (Nv+Nl)]
rate - learning rate
momentum - learning momentum
weight_decay - L2 regularizer
noisy - 0 = use h0_mean, use visible means everywhere
1 = use h0_samp, use visible means everywhere
2 = use samples everywhere
'''
# collect gradient statistics
h0_samp,h0_mean = self.sample_h_given_v(v0_data)
hk_samp = h0_samp
if noisy == 0:
for k in xrange(K): # vk_mean <--> hk_samp
vk_mean = self.mean_field_v_given_h(hk_samp)
hk_samp, hk_mean = self.sample_h_given_v(vk_mean)
h0 = h0_mean
vk = vk_mean
hk = hk_mean
elif noisy == 1:
for k in xrange(K): # vk_mean <--> hk_samp
vk_mean = self.mean_field_v_given_h(hk_samp)
hk_samp, hk_mean = self.sample_h_given_v(vk_mean)
h0 = h0_samp # <--
vk = vk_mean
hk = hk_mean
elif noisy == 2:
for k in xrange(K): # vk_samp <--> hk_samp
vk_samp, vk_mean = self.sample_v_given_h(hk_samp)
hk_samp, hk_mean = self.sample_h_given_v(vk_samp)
h0 = h0_samp
vk = vk_samp # <--
hk = hk_samp # <--
W_grad,bv_grad,bh_grad = self.compute_gradients(v0_data,h0,vk,hk)
if weight_decay > 0.0:
W_grad += weight_decay * self.W
rate = float(rate)
if momentum > 0.0:
momentum = float(momentum)
self.W_update = momentum * self.W_update - rate*W_grad
self.bh_update = momentum * self.bh_update - rate*bh_grad
self.bv_update = momentum * self.bv_update - rate*bv_grad
else:
self.W_update = -rate*W_grad
self.bh_update = -rate*bh_grad
self.bv_update = -rate*bv_grad
self.W = self.W + self.W_update
self.bh = self.bh + self.bh_update
self.bv = self.bv + self.bv_update
def compute_gradients(self,v0,h0,vk,hk):
N = v0.shape[0]
N_inv = 1./N
W_grad = N_inv * (gp.dot(vk.T, hk) - gp.dot(v0.T, h0))
bv_grad = gp.mean(vk - v0,axis=0)
bh_grad = gp.mean(hk - h0,axis=0)
return W_grad,bv_grad,bh_grad
def gibbs_samples(self,K,v0_data,noisy=0):
'''
compute a visible unit sample using Gibbs sampling
input:
K - number of complete Gibbs iterations
v_input - seed value of visible units
noisy - 0 = always use visible means and use hidden means to drive final sample
1 = drive final sample with final hidden sample
2 = use visible means for updates but use visible and hidden samples for final update
3 = always use samples for both visible and hidden updates
note: hidden samples are always used to drive visible reconstructions unless noted otherwise
'''
Nv = self.Nv
h0_samp,h0_mean = self.sample_h_given_v(v0_data)
hk_samp = h0_samp
hk_mean = h0_mean
if noisy < 3:
for k in xrange(K-1): # hk_samp <--> vk_mean
vk_mean = self.mean_field_v_given_h(hk_samp)
hk_samp, hk_mean = self.sample_h_given_v(vk_mean)
else:
for k in xrange(K-1): # hk_samp <--> vk_samp
vk_samp, vk_mean = self.sample_v_given_h(hk_samp)
hk_samp, hk_mean = self.sample_h_given_v(vk_samp)
if noisy == 0: # hk_mean --> v_mean
v_mean = self.mean_field_v_given_h(hk_mean)
return v_mean
elif noisy == 1: # hk_samp --> v_mean
v_mean = self.mean_field_v_given_h(hk_samp)
return v_mean
elif noisy > 1: # hk_samp --> v_samp
v_samp, v_mean = self.sample_v_given_h(hk_samp)
return v_samp
def recon_error(self, v0_data,K=1,print_output=False):
'''
compute K-step reconstruction error
'''
vk_mean = self.gibbs_samples(K,v0_data,noisy=0)
recon_error = gp.mean(gp.abs(v0_data - vk_mean))
if print_output:
output = '%30s %6.5f' % ('vis error:', recon_error/self.vis_scale)
print output
return output
else:
return recon_error
def update_stats(self):
W_stats = [gp.min(self.W),gp.mean(gp.abs(self.W)),gp.max(self.W)]
bh_stats = [gp.min(self.bh),gp.mean(gp.abs(self.bh)),gp.max(self.bh)]
bv_stats = [gp.min(self.bv),gp.mean(gp.abs(self.bv)),gp.max(self.bv)]
W_update_stats = [gp.min(self.W_update), gp.mean(gp.abs(self.W_update)), gp.max(self.W_update)]
bh_update_stats = [gp.min(self.bh_update), gp.mean(gp.abs(self.bh_update)), gp.max(self.bh_update)]
bv_update_stats = [gp.min(self.bv_update), gp.mean(gp.abs(self.bv_update)), gp.max(self.bv_update)]
param_stats = dict(W=W_stats,bh=bh_stats,bv=bv_stats)
update_stats = dict(W=W_update_stats,
bh=bh_update_stats,bv=bv_update_stats)
return [param_stats, update_stats]
class LRBM(RBM):
'''
Labeled Restricted Boltzmann Machine
'''
def __init__(self, params={}):
'''
input:
-----------------
(in addition to those defined in RBM class)
Nl: number of label units (group of softmax units)
'''
dtype = 'float32'
super(LRBM,self).__init__(params)
bv = params.get('bv')
Nl = params['Nl']
Nv = self.Nv
Nh = self.Nh
# add label units to visible units
# W is initialized with uniformly sampled data
# from -4.*sqrt(6./(Nv+Nh)) and 4.*sqrt(6./(Nh+Nv))
W = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(Nv+Nl+Nh)),
high = 4*np.sqrt(6./(Nv+Nl+Nh)),
size = (Nv+Nl, Nh)),
dtype = dtype)
W = gp.garray(W)
if bv is None :
bv = gp.zeros((Nv+Nl))
else:
bv = gp.garray(bv)
# new label-unit params -------------------------------------------
self.Nl = Nl # num label units
self.W = W # (vis+lab)<->hid weights
self.bv = bv # vis bias
self.W_update = gp.zeros((Nv+Nl,Nh))
self.bv_update = gp.zeros((Nv+Nl,))
self.params += ['Nl']
def load_params(self,filename):
'''load parameters from file'''
super(LRBM,self).load_params(filename)
Nv,Nh,Nl,= self.Nv,self.Nh,self.Nl
dtype = self.dtype
self.W_update = gp.zeros((Nv+Nl,Nh))
self.bv_update = gp.zeros((Nv+Nl,))
def save_params(self,filename=None):
'''save parameters to file'''
if filename is None:
fileid = np.random.randint(100000)
filename = 'LRBM_%u.pkl' % fileid
super(LRBM,self).save_params(filename)
def return_params(self):
'''
return a formatted string containing scalar parameters
'''
output = super(LRBM,self).return_params()
output = 'Nl=%u, ' % (self.Nl) + output
return output
def separate_vis_lab(self,x,axis=1):
'''
separate visible unit data from label unit data
'''
Nl = self.Nl
if x.ndim == 1:
axis = 0
if axis == 0:
x_lab = x[-Nl:]
x_vis = x[:-Nl]
elif axis == 1:
x_lab = x[:,-Nl:]
x_vis = x[:,:-Nl]
return x_vis, x_lab
def join_vis_lab(self,x_vis,x_lab,axis=1):
'''
join visible unit data to label unit data
'''
if x_vis.ndim == 1:
axis = 0
x = gp.concatenate((x_vis,x_lab),axis=axis)
return x
def mean_field_v_given_h(self,h):
'''compute mean-field reconstruction of P(v|h)'''
x = self.bv + gp.dot(h, self.W.T)
x_vis, x_lab = self.separate_vis_lab(x)
lab_mean = softmax(x_lab)
if self.vis_unit == 'binary':
vis_mean = sigmoid(x_vis)
elif self.vis_unit == 'linear':
vis_mean = log_1_plus_exp(x_vis) - log_1_plus_exp(x_vis-self.vis_scale)
means = self.join_vis_lab(vis_mean,lab_mean)
return means
def sample_v_given_h(self,h):
'''compute samples from P(v|h)'''
if self.vis_unit == 'binary':
means = self.mean_field_v_given_h(h)
vis_mean,lab_mean = self.separate_vis_lab(means)
vis_samp = vis_mean.rand() < vis_mean
elif self.vis_unit == 'linear':
x = self.bv + gp.dot(h, self.W.T)
x_vis,x_lab = self.separate_vis_lab(x)
# variance of noise is sigmoid(x_vis) - sigmoid(x_vis - vis_scale)
vis_stddev = gp.sqrt(sigmoid(x_vis) - sigmoid(x_vis - self.vis_scale))
vis_mean = log_1_plus_exp(x_vis) - log_1_plus_exp(x_vis-self.vis_scale)
vis_noise = stddev * gp.random.standard_normal(size=x.shape)
vis_samp = vis_mean + vis_noise
vis_samp[vis_samp < 0] = 0
vis_samp[vis_samp > self.vis_scale] = self.vis_scale
lab_mean = softmax(x_lab)
means = self.join_vis_lab(vis_mean,lab_mean)
lab_samp = sample_categorical(lab_mean)
samples = self.join_vis_lab(vis_samp,lab_samp)
return samples, means
def label_probabilities(self,v_input,output_h=False):
'''
compute the activation probability of each label unit given the visible units
'''
#compute free energy for each label configuration
# F(v,c) = -sum(v*bv) - bl[c] - sum(log(1 + exp(z_c)))
# where z_c = bh + dot(v,W) + r[c] (r[c] are the weights for label c)
# also, v_input = [v,l], where l are binary "one-hot" labels
b_hid = self.bh
b_vis, b_lab = self.separate_vis_lab(self.bv)
v_vis, v_lab = self.separate_vis_lab(v_input)
W_vis,W_lab = self.separate_vis_lab(self.W,axis=0)
# the b_vis term cancels out in the softmax
#F = -np.sum(v_vis*b_vis,axis=1)
#F = F.reshape((-1,1)) - b_lab
F = - b_lab
z = b_hid + gp.dot(v_vis,W_vis)
z = z.reshape(z.shape + (1,))
z = z + W_lab.T.reshape((1,) + W_lab.T.shape)
hidden_terms = -gp.sum(log_1_plus_exp(z), axis=1)
F = F + hidden_terms
pr = softmax(-F)
# compute hidden probs for each label configuration
# this is used in the discriminative updates
if output_h:
h = sigmoid(z)
return pr, h
else:
return pr
def discriminative_train(self,v_input,rate=0.001,momentum=0.0,weight_decay=0.001):
'''
Update weights using discriminative updates.
These updates use gradient ascent of the log-likelihood of the
label probability of the correct label
input:
v_input - [v_past, v_visible, v_labels]
(v_labels contains the binary activation of the correct label)
'''
N = v_input.shape[0]
# things to compute:
# h_d - hidden unit activations for each label configuration
# p_d - label unit probabilities
p_d, h_d = self.label_probabilities(v_input,output_h=True)
v_vis,v_lab = self.separate_vis_lab(v_input)
ind, true_labs = gp.where(v_lab == 1)
#scale = float(rate / N)
N_inv = 1./N
# prob_scale = (1-p_d) for correct label and -p_d for other labels
prob_scale = -p_d
prob_scale[ind,true_labs] += 1
ps_broad = prob_scale.reshape((N,1,self.Nl)) # make broadcastable across h_d
p_h_sum = gp.sum(ps_broad * h_d, axis=2)
# compute gradients ----------------------------------------------
# W = [w,r]
w_grad = gp.dot(v_vis.T, p_h_sum) # vis<-->hid
r_grad = gp.sum( ps_broad * h_d, axis=0 ).T # lab<-->hid
W_grad = N_inv * self.join_vis_lab(w_grad,r_grad,axis=0)# [vis,lab]<-->hid
bh_grad = gp.mean(p_h_sum,axis=0) # -->hid
# bv = [bvv,bvl] # -->[vis,lab]
bvv,bvl = self.separate_vis_lab(self.bv)
bvv_grad = gp.zeros(bvv.shape) # -->vis
bvl_grad = gp.mean(prob_scale,axis=0) # -->lab
# ---------------------------------------------------------------
if weight_decay > 0.0:
W_grad += -weight_decay * self.W
#Wv_grad = self.join_vis_lab(Wvv_grad,Wvl_grad)
bv_grad = self.join_vis_lab(bvv_grad,bvl_grad)
rate = float(rate)
if momentum > 0.0:
momentum = float(momentum)
self.W_update = momentum * self.W_update + rate*W_grad
self.bh_update = momentum * self.bh_update + rate*bh_grad
self.bv_update = momentum * self.bv_update + rate*bv_grad
else:
self.W_update = rate*W_grad
self.bh_update = rate*bh_grad
self.bv_update = rate*bv_grad
self.W += self.W_update
self.bh += self.bh_update
self.bv += self.bv_update
def recon_error(self,v0_data,K=1,print_output=False):
'''compute K-step reconstruction error'''
vk_mean = self.gibbs_samples(K,v0_data,noisy=0)
v0_vis,v0_lab = self.separate_vis_lab(v0_data)
vk_vis,vk_lab = self.separate_vis_lab(vk_mean)
vis_error = gp.mean(gp.abs(v0_vis - vk_vis))
lab_error = gp.mean(gp.abs(v0_lab - vk_lab))
lab_probs = self.label_probabilities(v0_data)
#pred_labs = gargmax(lab_probs)
pred_labs = lab_probs.argmax(axis=1)
ind, true_labs = gp.where(v0_lab == 1)
percent_correct = gp.mean(pred_labs == true_labs)
cross_entropy = -gp.mean(gp.log(lab_probs[ind,true_labs]))
#prob_error = gp.mean(gp.abs(1. - lab_probs[ind,true_labs]))
if print_output:
output = '%30s %6.5f' % ('vis error:', vis_error/self.vis_scale) + '\n'
output += '%30s %6.5f' % ('lab error:', lab_error) + '\n'
#output += '%30s %6.5f' % ('prob error:', prob_error) + '\n'
output += '%30s %6.5f' % ('cross entropy:', cross_entropy) + '\n'
output += '%30s %6.5f' % ('class correct:', percent_correct)
print output
return output
else:
return percent_correct, cross_entropy, lab_error, vis_error/self.vis_scale
class CRBM(object):
'''Conditional Restricted Boltzmann Machine (CRBM) using gnumpy '''
def __init__(self, params={}):
'''
RBM constructor. Defines the parameters of the model along with
basic operations for inferring hidden from visible (and vice-versa),
as well as for performing CD updates.
input:
-----------------
Nv: number of visible units
Nh: number of hidden units
Tv: order of autoregressive weights (RBM has Tv=0)
(how far into the past do they go?)
Th: order of past visible to current hidden weights (RBM has Th=0)
(how far into the past do they go?)
period: natural repetition period of data [default=Tv]
(for initializing generative gibbs sampling)
vis_unit: type of visible unit {'binary','linear'}
('linear' = rectified linear unit)
vis_scale: maximum output value for linear visible units
(average std_dev is ~= 1 at this scale,
so pre-scale training data with this in mind)
bv: visible bias
Wv_scale - how much to rescale Wv updates
other params:
--------------------
W: weight between current hidden and visible units (undirected)
[Nv x Nh]
Wh: past visible to current hidden weights (directed)
[Tv*Nv x Nh]
Wv: past visible to current visible weights (directed)
[Tv*Nv x Nv]
bh: hidden bias
'''
dtype = 'float32'
Nv = params['Nv']
Nh = params['Nh']
Tv = params['Tv']
Th = params['Th']
T = max(Tv,Th)
period = params.get('period',T)
vis_unit = params.get('vis_unit','binary')
vis_scale = params.get('vis_scale')
bv = params.get('bv')
Wv_scale = params.get('Wv_scale',0.01)
if vis_unit not in ['binary','linear']:
raise ValueError, 'Unknown visible unit type %s' % vis_unit
if vis_unit == 'linear':
if vis_scale is None:
raise ValueError, 'Must set vis_scale for linear visible units'
elif vis_unit == 'binary':
vis_scale = 1.
if period is None:
period = T
else:
if period > T:
raise ValueError, 'period must be <= max(Tv,Th)'
# W is initialized with `initial_W` which is uniformly sampled
# from -4.*sqrt(6./(Nv+Nh)) and 4.*sqrt(6./(Nh+Nv))
# the output of uniform if converted using asarray to dtype
W = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(Nv+Nh)),
high = 4*np.sqrt(6./(Nv+Nh)),
size = (Nv, Nh)),
dtype = dtype)
W = gp.garray(W)
Wv = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(Nv*Tv+Nv)),
high = 4*np.sqrt(6./(Nv*Tv+Nv)),
size = (Nv*Tv, Nv)),
dtype = dtype)
Wv = gp.garray(Wv)
Wh = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(Nv*Th+Nh)),
high = 4*np.sqrt(6./(Nv*Th+Nh)),
size = (Nv*Th, Nh)),
dtype = dtype)
Wh = gp.garray(Wh)
bh = gp.zeros(Nh)
if bv is None :
bv = gp.zeros(Nv)
else:
bv = gp.garray(bv)
# params -------------------------------------------
self.dtype = 'float32'
self.Nv = Nv # num visible units
self.Nh = Nh # num hidden units
self.Tv = Tv # num vis->vis delay taps
self.Th = Th # num vis->hid delay taps
self.T = T # max(Tv,Th)
self.period = period # typical repetition period of sequences
self.vis_unit = vis_unit # type of visible output unit
self.vis_scale = vis_scale # scale of linear output units
self.W = W # vis<->hid weights
self.Wv = Wv # vis->vis delay weights
self.Wh = Wh # vis->hid delay weights
self.bv = bv # vis bias
self.bh = bh # hid bias
self.Wv_scale = Wv_scale # rescale Wv updates
self.W_update = gp.zeros((Nv,Nh))
self.Wv_update = gp.zeros((Nv*Tv,Nv))
self.Wh_update = gp.zeros((Nv*Th,Nh))
self.bh_update = gp.zeros((Nh,))
self.bv_update = gp.zeros((Nv,))
self.params = [ 'dtype',
'period','vis_unit','vis_scale',
'Nv','Nh','Tv','Th','T',
'W','Wv','Wh','bh','bv']
def save_params(self,filename=None):
'''save parameters to file'''
if filename is None:
id = np.random.randint(100000)
filename = 'CRBM_%u.pkl' % id
params_out = {}
for p in self.params:
val = vars(self)[p]
if type(val) is gp.garray:
params_out[p] = val.as_numpy_array()
else:
params_out[p] = val
fp = open(filename,'wb')
pkl.dump(params_out,fp,protocol=-1)
fp.close()
print 'saved %s' % filename
def load_params(self,filename):
'''load parameters from file'''
fp = open(filename,'rb')
params_in = pkl.load(fp)
fp.close()
for key,value in params_in.iteritems():
vars(self)[key] = value
Nv,Nh,Tv,Th = self.Nv,self.Nh,self.Tv,self.Th
dtype = self.dtype
self.W_update = gp.zeros((Nv,Nh))
self.Wv_update = gp.zeros((Nv*Tv,Nv))
self.Wh_update = gp.zeros((Nv*Th,Nh))
self.bh_update = gp.zeros((Nh,))
self.bv_update = gp.zeros((Nv,))
self.W = gp.garray(self.W)
self.Wv = gp.garray(self.Wv)
self.Wh = gp.garray(self.Wh)
self.bh = gp.garray(self.bh)
self.bv = gp.garray(self.bv)
def return_params(self):
'''
return a formatted string containing scalar parameters
'''
output = 'Nv=%u, Nh=%u, vis_unit=%s, vis_scale=%0.2f, Tv=%u, Th=%u, Wv_scale=%g' \
% (self.Nv,self.Nh,self.vis_unit,self.vis_scale,self.Tv,self.Th,self.Wv_scale)
return output
def extract_data(self,v_input):
Nv = self.Nv
Tv = self.Tv
Th = self.Th
if v_input.ndim == 1:
v_data = v_input[-Nv:]
vv_past = v_input[-Nv*(1+Tv):-Nv]
vh_past = v_input[-Nv*(1+Th):-Nv]
else:
v_data = v_input[:,-Nv:]
vv_past = v_input[:,-Nv*(1+Tv):-Nv]
vh_past = v_input[:,-Nv*(1+Th):-Nv]
return v_data, vv_past, vh_past
def mean_field_h_given_v(self,v,h_bias):
'''compute mean-field reconstruction of P(ht=1|vt,v<t)'''
prob = sigmoid(h_bias + gp.dot(v, self.W))
return prob
def mean_field_h_given_v_frame(self,v_input):
'''
compute mean-field reconstruction of P(ht=1|vt,v<t)
and compute h_bias from data
input:
v_frames - contains [v_past, v_curr] in a matrix
'''
v,vv_past,vh_past = self.extract_data(v_input)
h_bias = self.bh + gp.dot(vh_past,self.Wh)
return sigmoid(h_bias + gp.dot(v, self.W))
def mean_field_v_given_h(self,h,v_bias):
'''compute mean-field reconstruction of P(vt|ht,v<t)'''
x = v_bias + gp.dot(h, self.W.T)
if self.vis_unit == 'binary':
return sigmoid(x)
elif self.vis_unit == 'linear':
return log_1_plus_exp(x) - log_1_plus_exp(x-self.vis_scale)
return prob
def sample_h_given_v(self,v,h_bias):
'''compute samples from P(ht=1|vt,v<t)'''
prob = self.mean_field_h_given_v(v,h_bias)
samples = prob.rand() < prob
return samples, prob
def sample_v_given_h(self,h,v_bias):
'''compute samples from P(vt|ht,v<t)'''
if self.vis_unit == 'binary':
mean = self.mean_field_v_given_h(h,v_bias)
samples = mean.rand() < mean
return samples, mean
elif self.vis_unit == 'linear':
x = v_bias + gp.dot(h, self.W.T)
# variance of noise is sigmoid(x) - sigmoid(x - vis_scale)
stddev = gp.sqrt(sigmoid(x) - sigmoid(x - self.vis_scale))
mean = log_1_plus_exp(x) - log_1_plus_exp(x-self.vis_scale)
noise = stddev * gp.randn(x.shape)
samples = mean + noise
samples *= samples > 0
samples_over = samples - self.vis_scale
samples_over *= samples_over > 0
samples_over -= samples_over
return samples, mean
def cdk(self,K,v_input,rate=0.001,momentum=0.0,weight_decay=0.001,noisy=0):
'''
compute K-step contrastive divergence update
input:
K - number of gibbs iterations (for cd-K)
v_input - contains [v_past, v0_data] = [(N x Nv*max(Tv,Th)), (N x Nv)]
rate - learning rate
momentum - learning momentum
weight_decay - L2 regularizer
noisy - 0 = use hidden samples, but means as final values
1 = use visible and hidden samples, but means as final values
2 = use visible and hidden samples, samples for final hidden values, means for final visibles
3 = use samples everywhere.
'''
# compute gradient statistics
v0_data,vv_past,vh_past = self.extract_data(v_input)
v_bias,h_bias = self.compute_dynamic_bias(v_input)
h0_samp,h0_mean = self.sample_h_given_v(v0_data,h_bias)
hk_samp = h0_samp
if noisy == 0:
for k in xrange(K): # vk_mean <--> hk_samp
vk_mean = self.mean_field_v_given_h(hk_samp,v_bias)
hk_samp, hk_mean = self.sample_h_given_v(vk_mean,h_bias)
h0 = h0_mean
vk = vk_mean
hk = hk_mean
elif noisy == 1:
for k in xrange(K): # vk_mean <--> hk_samp
vk_samp, vk_mean = self.sample_v_given_h(hk_samp,v_bias)
hk_samp, hk_mean = self.sample_h_given_v(vk_samp,h_bias)
h0 = h0_mean # <--
vk = vk_mean
hk = hk_mean
elif noisy == 2:
for k in xrange(K): # vk_samp <--> hk_samp
vk_samp, vk_mean = self.sample_v_given_h(hk_samp,v_bias)
hk_samp, hk_mean = self.sample_h_given_v(vk_samp,h_bias)
h0 = h0_samp
vk = vk_mean # <--
hk = hk_samp # <--
elif noisy == 3:
for k in xrange(K): # vk_samp <--> hk_samp
vk_samp, vk_mean = self.sample_v_given_h(hk_samp,v_bias)
hk_samp, hk_mean = self.sample_h_given_v(vk_samp,h_bias)
h0 = h0_samp
vk = vk_samp # <--
hk = hk_samp # <--
# compute gradients
W_grad,Wv_grad,Wh_grad,bv_grad,bh_grad = self.compute_gradients(v_input,h0,vk,hk)
if weight_decay > 0.0:
W_grad += weight_decay * self.W
Wv_grad += weight_decay * self.Wv
Wh_grad += weight_decay * self.Wh
rate = float(rate)
if momentum > 0.0:
momentum = float(momentum)
self.W_update = momentum * self.W_update - rate*W_grad
self.Wv_update = momentum * self.Wv_update - self.Wv_scale*rate*Wv_grad
self.Wh_update = momentum * self.Wh_update - rate*Wh_grad
self.bh_update = momentum * self.bh_update - rate*bh_grad
self.bv_update = momentum * self.bv_update - rate*bv_grad
else:
self.W_update = -rate*W_grad
self.Wv_update = -self.Wv_scale*rate*Wv_grad
self.Wh_update = -rate*Wh_grad
self.bh_update = -rate*bh_grad
self.bv_update = -rate*bv_grad
self.W = self.W + self.W_update
self.Wv = self.Wv + self.Wv_update
self.Wh = self.Wh + self.Wh_update
self.bh = self.bh + self.bh_update
self.bv = self.bv + self.bv_update
def compute_gradients(self,v_input,h0,vk,hk):
v0,vv_past,vh_past = self.extract_data(v_input)
N = v0.shape[0]
N_inv = 1./N
W_grad = N_inv * (gp.dot(vk.T, hk) - gp.dot(v0.T, h0))
Wv_grad = N_inv * (gp.dot(vv_past.T, vk) - gp.dot(vv_past.T, v0))
Wh_grad = N_inv * (gp.dot(vh_past.T, hk) - gp.dot(vh_past.T, h0))
bv_grad = gp.mean(vk - v0,axis=0)
bh_grad = gp.mean(hk - h0,axis=0)
return W_grad,Wv_grad,Wh_grad,bv_grad,bh_grad
def compute_dynamic_bias(self,v_input):
v_data,vv_past,vh_past = self.extract_data(v_input)
v_bias = self.bv + gp.dot(vv_past,self.Wv)
h_bias = self.bh + gp.dot(vh_past,self.Wh)
return v_bias, h_bias
def generate(self,seed,num_steps,K,noisy=False):
'''
generate a sequence of length num_steps given the seed sequence
input:
seed - Nv dimensional sequence of length >= max(Tv,Th)
flattened using row-major ordering
(units in same time step nearest each other)
num_steps - number of sequence steps to generate
K - number of gibbs iterations per sample
noisy - noise level of gibbs samples [0,1,2,3] (see gibbs_samples() method)
output:
sequence - Nv dimensional sequence of length num_steps + seed length
'''
T = max(self.Tv,self.Th)
Nv = self.Nv
frame_size = Nv * T
hop_size = Nv
period_size = Nv * self.period
if len(seed) < frame_size:
raise ValueError, 'Seed not long enough'
sequence = np.concatenate( (seed, np.zeros(num_steps * Nv))).astype('float32')
sequence = gp.garray(sequence)
idx = len(seed) - frame_size
while idx+frame_size+Nv <= len(sequence):
v_input = sequence[idx:idx+frame_size+Nv]
# use samples from one period ago as starting point for Gibbs sampling
v_input[-Nv:] = v_input[-period_size-Nv:-period_size]
v_curr = self.gibbs_samples(K,v_input,noisy)
sequence[idx+frame_size:idx+frame_size+Nv] = v_curr
idx += hop_size
return sequence
def gibbs_samples(self,K,v_input,noisy=0):
'''
compute a visible unit sample using Gibbs sampling
input:
K - number of complete Gibbs iterations
v_input - [v_past, v_curr_seed] array flattened using row-major ordering
* v_past of length Nv*max(Tv,Th)
* v_curr_seed of length Nv
noisy - 0 = always use visible means and use hidden means to drive final sample
1 = drive final sample with final hidden sample
2 = use visible means for updates but use visible and hidden samples for final update
3 = always use samples for both visible and hidden updates
note: hidden samples are always used to drive visible reconstructions unless noted otherwise
'''
Nv = self.Nv
v0_data,vv_past,vh_past = self.extract_data(v_input)
v_bias,h_bias = self.compute_dynamic_bias(v_input)
h0_samp,h0_mean = self.sample_h_given_v(v0_data,h_bias)
hk_samp = h0_samp
hk_mean = h0_mean
if noisy < 3:
for k in xrange(K-1): # hk_samp <--> vk_mean
vk_mean = self.mean_field_v_given_h(hk_samp,v_bias)
hk_samp, hk_mean = self.sample_h_given_v(vk_mean,h_bias)
else:
for k in xrange(K-1): # hk_samp <--> vk_samp
vk_samp, vk_mean = self.sample_v_given_h(hk_samp,v_bias)
hk_samp, hk_mean = self.sample_h_given_v(vk_samp,h_bias)
if noisy == 0: # hk_mean --> v_mean
v_mean = self.mean_field_v_given_h(hk_mean,v_bias)
return v_mean
elif noisy == 1: # hk_samp --> v_mean
v_mean = self.mean_field_v_given_h(hk_samp,v_bias)
return v_mean
elif noisy > 1: # hk_samp --> v_samp
v_samp, v_mean = self.sample_v_given_h(hk_samp,v_bias)
return v_samp
def recon_error(self, v_input,K=1,print_output=False):
'''compute K-step reconstruction error'''
v0_data,vv_past,vh_past = self.extract_data(v_input)
vk_mean = self.gibbs_samples(K,v_input,noisy=0)
recon_error = gp.mean(gp.abs(v0_data - vk_mean))
if print_output:
output = '%30s %6.5f' % ('vis error:', recon_error/self.vis_scale)
print output
return output
else:
return recon_error
def update_stats(self):
W_stats = [gp.min(self.W),gp.mean(gp.abs(self.W)),gp.max(self.W)]
Wv_stats = [gp.min(self.Wv),gp.mean(gp.abs(self.Wv)),gp.max(self.Wv)]
Wh_stats = [gp.min(self.Wh),gp.mean(gp.abs(self.Wh)),gp.max(self.Wh)]
bh_stats = [gp.min(self.bh),gp.mean(gp.abs(self.bh)),gp.max(self.bh)]
bv_stats = [gp.min(self.bv),gp.mean(gp.abs(self.bv)),gp.max(self.bv)]
W_update_stats = [gp.min(self.W_update), gp.mean(gp.abs(self.W_update)), gp.max(self.W_update)]
Wv_update_stats = [gp.min(self.Wv_update), gp.mean(gp.abs(self.Wv_update)), gp.max(self.Wv_update)]
Wh_update_stats = [gp.min(self.Wh_update), gp.mean(gp.abs(self.Wh_update)), gp.max(self.Wh_update)]
bh_update_stats = [gp.min(self.bh_update), gp.mean(gp.abs(self.bh_update)), gp.max(self.bh_update)]
bv_update_stats = [gp.min(self.bv_update), gp.mean(gp.abs(self.bv_update)), gp.max(self.bv_update)]
param_stats = dict(W=W_stats,Wv=Wv_stats,Wh=Wh_stats,bh=bh_stats,bv=bv_stats)
update_stats = dict(W=W_update_stats,
Wv=Wv_update_stats,Wh=Wh_update_stats,bh=bh_update_stats,bv=bv_update_stats)
return [param_stats, update_stats]
class LCRBM(CRBM):
'''Labeled Conditional Restricted Boltzmann Machine (CRBM) using numpy '''
def __init__(self, params={}):
'''
input:
-----------------
(in addition to those defined in CRBM class)
Nl: number of label units (group of softmax units)
'''
super(LCRBM,self).__init__(params)
dtype = 'float32'
bv = params.get('bv')
Nl = params['Nl']
Nv = self.Nv
Nh = self.Nh
Tv = self.Tv
# add label units to visible units
# W is initialized with uniformly sampled data
# from -4.*sqrt(6./(Nv+Nh)) and 4.*sqrt(6./(Nh+Nv))
W = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(Nv+Nl+Nh)),
high = 4*np.sqrt(6./(Nv+Nl+Nh)),
size = (Nv+Nl, Nh)),
dtype = dtype)
W = gp.garray(W)
Wv = np.asarray( np.random.uniform(
low = -4*np.sqrt(6./(Nv*Tv+Nv)),
high = 4*np.sqrt(6./(Nv*Tv+Nv)),
size = (Nv*Tv, Nv)),
dtype = dtype)
Wv = gp.garray(Wv)
if bv is None :
bv = gp.zeros(Nv+Nl)
else:
bv = gp.garray(bv)
cumsum = np.zeros((Nl,Nl),dtype=self.dtype)
cumsum[np.triu_indices(Nl)] = 1
cumsum = gp.garray(cumsum)
# new label-unit params -------------------------------------------
self.Nl = Nl # num label units
self.cumsum = cumsum # upper triangular matrix for gpu cumsum
self.W = W # (vis+lab)<->hid weights
self.Wv = Wv # vis->vis delay weights
self.bv = bv # vis bias
self.W_update = gp.zeros((Nv+Nl,Nh))
self.Wv_update = gp.zeros((Nv*Tv,Nv))
self.bv_update = gp.zeros((Nv+Nl,))
self.params += ['Nl','cumsum']
def load_params(self,filename):
'''load parameters from file'''
super(LCRBM,self).load_params(filename)
Nv,Nh,Nl,Tv = self.Nv,self.Nh,self.Nl,self.Tv
dtype = self.dtype
self.W_update = gp.zeros(self.W.shape)
self.Wv_update = gp.zeros(self.Wv.shape)
self.bv_update = gp.zeros(self.bv.shape)
def save_params(self,filename=None):
'''save parameters to file'''
if filename is None:
id = np.random.randint(100000)
filename = 'LCRBM_%u.pkl' % id
super(LCRBM,self).save_params(filename)
def return_params(self):
'''
return a formatted string containing scalar parameters
'''
output = super(LCRBM,self).return_params()
output = 'Nl=%u, ' % (self.Nl) + output
return output
def extract_data(self,v_input):
Nv = self.Nv
Nl = self.Nl
Tv = self.Tv
Th = self.Th
Nvl = Nv + Nl
if v_input.ndim == 1:
v_data = v_input[-Nvl:]
vv_past = v_input[-(Nv*Tv+Nvl):-Nvl]
vh_past = v_input[-(Nv*Th+Nvl):-Nvl]
else:
v_data = v_input[:,-Nvl:]
vv_past = v_input[:,-(Nv*Tv+Nvl):-Nvl]
vh_past = v_input[:,-(Nv*Th+Nvl):-Nvl]
return v_data, vv_past, vh_past
def separate_vis_lab(self,x,axis=1):
'''
separate visible unit data from label unit data
'''
Nl = self.Nl
if x.ndim == 1:
axis = 0
if axis == 0:
x_lab = x[-Nl:]
x_vis = x[:-Nl]
elif axis == 1:
x_lab = x[:,-Nl:]
x_vis = x[:,:-Nl]
return x_vis, x_lab
def join_vis_lab(self,x_vis,x_lab,axis=1):
'''
join visible unit data to label unit data
'''
if x_vis.ndim == 1:
axis = 0
x = gp.concatenate((x_vis,x_lab),axis=axis)
return x
def mean_field_v_given_h(self,h,v_bias):
'''compute mean-field reconstruction of P(vt|ht,v<t)'''
x = v_bias + gp.dot(h, self.W.T)
x_vis, x_lab = self.separate_vis_lab(x)
lab_mean = softmax(x_lab)
if self.vis_unit == 'binary':
vis_mean = sigmoid(x_vis)
elif self.vis_unit == 'linear':
vis_mean = log_1_plus_exp(x_vis) - log_1_plus_exp(x_vis-self.vis_scale)
means = self.join_vis_lab(vis_mean,lab_mean)
return means
def sample_v_given_h(self,h,v_bias):
'''compute samples from P(vt|ht,v<t)'''
if self.vis_unit == 'binary':
means = self.mean_field_v_given_h(h,v_bias)
vis_mean,lab_mean = self.separate_vis_lab(means)
vis_samp = vis_mean.rand() < vis_mean
elif self.vis_unit == 'linear':
x = v_bias + gp.dot(h, self.W.T)
x_vis,x_lab = self.separate_vis_lab(x)
# variance of noise is sigmoid(x_vis) - sigmoid(x_vis - vis_scale)
vis_stddev = gp.sqrt(sigmoid(x_vis) - sigmoid(x_vis - self.vis_scale))
vis_mean = log_1_plus_exp(x_vis) - log_1_plus_exp(x_vis-self.vis_scale)
vis_noise = vis_stddev * gp.randn(x_vis.shape)
vis_samp = vis_mean + vis_noise
vis_samp *= vis_samp > 0
vis_over = vis_samp - self.vis_scale
vis_over *= vis_over > 0
vis_samp -= vis_over
lab_mean = softmax(x_lab)
means = self.join_vis_lab(vis_mean,lab_mean)
#lab_samp = sample_categorical(lab_mean,self.cumsum)
lab_samp = lab_mean
samples = self.join_vis_lab(vis_samp,lab_samp)
return samples, means
def compute_gradients(self,v_input,h0,vk,hk):
v0,vv_past,vh_past = self.extract_data(v_input)
v0_vis,v0_lab = self.separate_vis_lab(v0)
vk_vis,vk_lab = self.separate_vis_lab(vk)
N = v0.shape[0]
N_inv = 1./N
W_grad = N_inv * (gp.dot(vk.T, hk) - gp.dot(v0.T, h0))
Wv_grad = N_inv * (gp.dot(vv_past.T, vk_vis) - gp.dot(vv_past.T, v0_vis))
Wh_grad = N_inv * (gp.dot(vh_past.T, hk) - gp.dot(vh_past.T, h0))
bv_grad = gp.mean(vk - v0,axis=0)
bh_grad = gp.mean(hk - h0,axis=0)
return W_grad,Wv_grad,Wh_grad,bv_grad,bh_grad
def compute_dynamic_bias(self,v_input):
v_data,vv_past,vh_past = self.extract_data(v_input)
v_bias = gp.tile(self.bv,[v_data.shape[0],1]).copy()
v_bias[:,:self.Nv] += gp.dot(vv_past,self.Wv)
h_bias = self.bh + gp.dot(vh_past,self.Wh)
return v_bias, h_bias
def label_probabilities(self,v_input,output_h=False):
'''
compute the activation probability of each label unit given the visible units
'''
#compute free energy for each label configuration
# F(v,c) = -sum(v*bv) - bl[c] - sum(log(1 + exp(z_c)))
# where z_c = bh + dot(v,W) + r[c] (r[c] are the weights for label c)
# also, v_data = [v,l], where l are binary "one-hot" labels
v_data,vv_past,vh_past = self.extract_data(v_input)
b_hid = self.bh + gp.dot(vh_past,self.Wh)
b_vis, b_lab = self.separate_vis_lab(self.bv)
v_vis, v_lab = self.separate_vis_lab(v_data)
W_vis,W_lab = self.separate_vis_lab(self.W,axis=0)
# the b_vis term cancels out in the softmax
#F = -np.sum(v_vis*b_vis,axis=1)
#F = F.reshape((-1,1)) - b_lab
F = - b_lab
z = b_hid + gp.dot(v_vis,W_vis)
z = z.reshape(z.shape + (1,))
z = z + W_lab.T.reshape((1,) + W_lab.T.shape)
hidden_terms = -gp.sum(log_1_plus_exp(z), axis=1)
F = F + hidden_terms
pr = softmax(-F)
# compute hidden probs for each label configuration
# this is used in the discriminative updates
if output_h:
h = sigmoid(z)
return pr, h
else:
return pr
def recon_error(self, v_input,K=1,print_output=False):
'''compute K-step reconstruction error'''
v0_data,vv_past,vh_past = self.extract_data(v_input)
vk_mean = self.gibbs_samples(K,v_input,noisy=0)
v0_vis,v0_lab = self.separate_vis_lab(v0_data)
vk_vis,vk_lab = self.separate_vis_lab(vk_mean)
vis_error = gp.mean(gp.abs(v0_vis - vk_vis))
lab_error = gp.mean(gp.abs(v0_lab - vk_lab))
lab_probs = self.label_probabilities(v_input)
#pred_labs = gargmax(lab_probs)
pred_labs = lab_probs.argmax(axis=1)
ind, true_labs = gp.where(v0_lab == 1)
percent_correct = gp.mean(pred_labs == true_labs)
cross_entropy = -gp.mean(gp.log(lab_probs[ind,true_labs]))
#prob_error = gp.mean(gp.abs(1. - lab_probs[ind,true_labs]))
if print_output:
output = '%30s %6.5f' % ('vis error:', vis_error/self.vis_scale) + '\n'
output += '%30s %6.5f' % ('lab error:', lab_error) + '\n'
#output += '%30s %6.5f' % ('prob error:', prob_error) + '\n'
output += '%30s %6.5f' % ('cross entropy:', cross_entropy) + '\n'
output += '%30s %6.5f' % ('class correct:', percent_correct)
print output
return output
else:
return percent_correct, cross_entropy, lab_error, vis_error/self.vis_scale
def generate(self,seed,num_steps,K,start_beat=1,noisy=False):
'''
generate a sequence of length num_steps given the seed sequence
input:
seed - Nv dimensional sequence of length >= max(Tv,Th)
flattened using row-major ordering
(units in same time step nearest each other)
num_steps - number of sequence steps to generate
K - number of gibbs iterations per sample
start_beat - beat number to start on
noisy - noise level of gibbs samples [0,1,2,3] (see gibbs_samples() method)
output:
sequence - Nv dimensional sequence of length num_steps + seed length
'''
T = max(self.Tv,self.Th)
Nv = self.Nv
frame_size = Nv * T
hop_size = Nv
period_size = Nv * self.period
if len(seed) < frame_size:
raise ValueError, 'Seed not long enough'
Nl = self.Nl
beat_labels = (np.arange(num_steps) + start_beat - 1) % Nl
sequence = np.concatenate( (seed, np.zeros(num_steps * Nv))).astype('float32')
sequence = gp.garray(sequence)
idx = len(seed) - frame_size
beat_idx = 0
while idx+frame_size+Nv <= len(sequence):
print idx+frame_size+Nv, 'of', len(sequence)
v_input = sequence[idx:idx+frame_size+Nv]
l_curr = gp.zeros(Nl)
l_curr[beat_labels[beat_idx]] = 1
#v_input[-Nv:] = gp.rand(Nv)
v_input[-Nv:] = v_input[-period_size-Nv:-period_size]
v_input = gp.concatenate([v_input,l_curr])
v_curr = self.gibbs_samples_labels_clamped(K,v_input[None,:],noisy)
sequence[idx+frame_size:idx+frame_size+Nv] = v_curr[0,:-Nl]
idx += hop_size
beat_idx += 1
return sequence
def gibbs_samples_labels_clamped(self,K,v_input,noisy=0):
'''
compute a visible unit sample using Gibbs sampling with label units clamped
input:
K - number of complete Gibbs iterations
v_input - [v_past, v_curr_seed, l_curr] array flattened using row-major ordering
* v_past of length Nv*max(Tv,Th)
* v_curr_seed of length Nv
* l_curr of length Nl
noisy - 0 = always use visible means and use hidden means to drive final sample
1 = drive final sample with final hidden sample
2 = use visible means for updates but use visible and hidden samples for final update
3 = always use samples for both visible and hidden updates
note: hidden samples are always used to drive visible reconstructions unless noted otherwise
'''
Nv = self.Nv
Nl = self.Nl
v0_data,vv_past,vh_past = self.extract_data(v_input)
l0_data = v0_data[:,-Nl:] # original labels
v_bias,h_bias = self.compute_dynamic_bias(v_input)
h0_samp,h0_mean = self.sample_h_given_v(v0_data,h_bias)
hk_samp = h0_samp
hk_mean = h0_mean
if noisy < 3:
for k in xrange(K-1): # hk_samp <--> vk_mean
vk_mean = self.mean_field_v_given_h(hk_samp,v_bias)
vk_mean[:,-Nl:] = l0_data
hk_samp, hk_mean = self.sample_h_given_v(vk_mean,h_bias)
else:
for k in xrange(K-1): # hk_samp <--> vk_samp
vk_samp, vk_mean = self.sample_v_given_h(hk_samp,v_bias)
vk_samp[:,-Nl:] = l0_data
hk_samp, hk_mean = self.sample_h_given_v(vk_samp,h_bias)
if noisy == 0: # hk_mean --> v_mean
v_mean = self.mean_field_v_given_h(hk_mean,v_bias)
#pdb.set_trace()
return v_mean
elif noisy == 1: # hk_samp --> v_mean
v_mean = self.mean_field_v_given_h(hk_samp,v_bias)
return v_mean
elif noisy > 1: # hk_samp --> v_samp
v_samp, v_mean = self.sample_v_given_h(hk_samp,v_bias)
return v_samp
def gargmax(x):
'''
compute argmax on gpu (across rows)
'''
maxes = gp.max(x,axis=1)
locs = x >= maxes.reshape((-1,1))
num_maxes = gp.sum(locs,axis=1)
if gp.any(num_maxes > 1):
N = x.shape[0]
args = np.zeros(N,dtype='int64')
inds = gp.where(locs)
args[inds[0]] = inds[1]
else:
args = gp.where(locs)[1]
return args
def sigmoid(x):
'''
compute logistic sigmoid function avoiding overflow
'''
return gp.logistic(x)
def softmax(x):
'''
compute softmax function for each row
while avoiding over/underflow
'''
m = gp.max(x,axis=1).reshape((-1,1)) # max for each row
y = gp.exp(x - m)
y /= gp.sum(y,axis=1).reshape((-1,1))
return y
def sample_categorical(probs,cumsum):
'''
sample from categorical distribution (1-sample multinomial distribution)
input:
probs - probabilities in each row add to one [N x K]
cumsum - square upper triangular matrix of ones of size K
output:
samples - [N x K] binary array with a single 1 per row
'''
if probs.ndim == 1:
probs = probs.reshape((1,-1))
N = probs.shape[0]
#cdf = np.cumsum(probs, axis=1)[:,:-1]
cdf = gp.dot(probs,cumsum)[:,:-1]
#uni = np.random.uniform(size=(N,1))
uni = gp.rand((N,1))
category = gp.sum(uni >= cdf,axis=1)
samples = gp.zeros(probs.shape)
samples[np.arange(N),category] = 1
return samples
def log_1_plus_exp(x):
'''
compute y = np.log(1+np.exp(x)) avoiding overflow
'''
return gp.log_1_plus_exp(x)
def train_rbm(rbm,training_data,params={}):
'''
train an rbm using contrastive divergence
input:
rbm - an initialized rbm class
training_data - [N x Nv] matrix of N observations of Nv dimensions
params - dictionary of parameters
params:
num_epochs
batch_size
learning_rate - list of two same size lists: [targets,pcts]
learning_momentum - list of two same size lists: [targets,pcts]
gibbs_iters - list of two same size lists: [targets,pcts]
weight_decay - L2 regularizer weight
[disabled]adjust_rate - whether to dynamically adjust the learning rate
to retain an average max update ratio around 0.001
update_target - target percentage of magnitude for updates
(used to adjust learning rate)
decay_target - (learning_rate) percentage of pre-decay value to decay to
decay_period - (learning_rate) percentage of num_epochs to decay over
noisy - the noise level to use when drawing gibbs samples
(see cdk methods of rbm classes)
reshuffle - how often to reshuffle the data
(set to > num_epochs to avoid reshuffling)
'''
# gather learning parameters --------------------------------------------
num_epochs = params.get('num_epochs', 300) #one extra for initial stats
batch_size = params.get('batch_size', 100)
if num_epochs <= 0:
print 'num_epochs <= 0, skipping\n'
return None
epoch_pct = np.linspace(0,1,num_epochs)
# learning rate
targets, pcts = params.get('learning_rate', [[0.0001, 0.0001],[0,1]])
learning_rate = np.r_[np.interp(epoch_pct,pcts,targets),0]
# momentum
targets, pcts = params.get('learning_momentum', [[0.001,0.05,0.05],[0,0.05,1]])
learning_momentum = np.interp(epoch_pct,pcts,targets)
# gibbs iterations
targets, pcts = params.get('gibbs_iters', [[1,1],[0,1]])
K = np.round(np.interp(epoch_pct,pcts,targets)).astype('uint16')
weight_decay = params.get('weight_decay', 0.001)
#adjust_rate = params.get('adjust_rate', True)
update_target = params.get('update_target', None)
noisy = params.get('noisy', 0)
reshuffle = params.get('reshuffle', num_epochs+1)
# learning rate decay parameter (used when update_target != None)
decay_target = params.get('decay_target',0.1) # percentage of pre-decay value
decay_period = params.get('decay_period',0.05) * num_epochs # time to decay over
decay_start = int(num_epochs - decay_period)
alpha = decay_target ** (1./decay_period) #autoregressive decay parameter
# monitoring params
save_hidden = False
save_weights = False
rng = np.random.RandomState(123) # init random number generator
# ----------------------------------------------------------------------
print '\n\nTraining RBM'
print '-------------------------------------'
print datetime.datetime.now()
print type(rbm)
print rbm.return_params() # model params
print params # learning params
print '-------------------------------------'
print '\n'
training_size = training_data.shape[0]
num_batches = training_size/batch_size
num_leftover = training_size - num_batches * batch_size
# collect batches
batches = []
for batch in xrange(num_batches):
batches += [slice(batch*batch_size,(batch+1)*batch_size)]
if num_leftover > 0:
batches += [slice(num_batches*batch_size,num_batches*batch_size+num_leftover)]
num_batches += 1
stats = StatContainer(num_epochs+1)
stats.init_stats(rbm.update_stats())
param_id = int(time.time() - 1334729157)
if save_hidden:
bh_at_epoch = np.zeros((num_epochs+1,rbm.Nh),dtype='float32')
bv_at_epoch = np.zeros((num_epochs+1,rbm.Nv),dtype='float32')
bh_at_epoch[0] = rbm.bh
bv_at_epoch[0] = rbm.bv
hidden_act = rbm.mean_field_h_given_v_frame(training_data)
fig = pl.figure(1); pl.clf(); ax = fig.add_subplot(111)
pl.imshow(hidden_act,cmap = 'gray', aspect='auto', interpolation='nearest')
fig.savefig('results/activations_at_epoch_%.4u.png' % (0,))
if save_weights:
weights = rbm.W
fig = pl.figure(1); pl.clf(); ax = fig.add_subplot(111)
pl.imshow(weights,cmap = 'gray', aspect='auto', interpolation='nearest')
fig.savefig('results/W_at_epoch_%.4u.png' % (0,))
t0 = time.time()
for epoch in xrange(0, num_epochs):
t = time.time()
if epoch % reshuffle == 0:
print '\n-----SHUFFLE TRAINING DATA-----\n'
perm = rng.permutation(training_data.shape[0])
training_data = training_data[perm]
print '\nepoch:', epoch+1
print 'learning rate:', learning_rate[epoch]
print 'learning momentum:', learning_momentum[epoch]
print 'contrastive divergence:', K[epoch]
for batch in xrange(0,num_batches):
rbm.cdk(K[epoch],training_data[batches[batch]],
learning_rate[epoch],learning_momentum[epoch],
weight_decay,noisy)
if batch == 0:
stats.add_stats(rbm.update_stats())
max_update = stats.print_stats()
print '\ntraining data'
rbm.recon_error(training_data,K[epoch],print_output=True)
if update_target is not None:
if epoch < decay_start:
# adjust learning rate to the sweet spot
if max_update < 0.1 * update_target:
learning_rate[epoch+1] = learning_rate[epoch] * 2
elif max_update > 10 * update_target:
learning_rate[epoch+1] = learning_rate[epoch] * 0.5
elif max_update < 0.9 * update_target:
learning_rate[epoch+1] = learning_rate[epoch] * 1.1
elif max_update > 1.2 * update_target:
learning_rate[epoch+1] = learning_rate[epoch] * 0.9
else:
learning_rate[epoch+1] = learning_rate[epoch]
else:
# learning rate decays to a fraction of value before decay start
learning_rate[epoch+1] = alpha * learning_rate[epoch]
print 'time: ', time.time() - t, 'sec'
if save_hidden:
bh_at_epoch[epoch+1] = rbm.bh
bv_at_epoch[epoch+1] = rbm.bv
hidden_act = rbm.mean_field_h_given_v_frame(training_data)
fig = pl.figure(1); pl.clf(); ax = fig.add_subplot(111)
pl.imshow(hidden_act,cmap = 'gray', aspect='auto', interpolation='nearest')
fig.savefig('results/activations_at_epoch_%.4u.png' % (epoch+1,))
if save_weights:
weights = rbm.W
fig = pl.figure(1); pl.clf(); ax = fig.add_subplot(111)
pl.imshow(weights,cmap = 'gray', aspect='auto', interpolation='nearest')
fig.savefig('results/W_at_epoch_%.4u.png' % (epoch+1,))
total_time = time.time() - t0
print '\ntotal time: ', total_time, 'sec'
print '\ntraining data'
train_error = rbm.recon_error(training_data,K[epoch],print_output=True)
return stats
class StatContainer(object):
'''
holds update stats for learning algorithms
'''
def __init__(self,num_epochs):
'''
input:
names - list of strings naming each variable to hold stats for
'''
self.num_epochs = num_epochs
def init_stats(self,stats_in):
'''
initialize the stats dictionaries with first sample
'''
names = stats_in[0].keys()
zero_stats = np.zeros((self.num_epochs,3))
param_stats = {}
update_stats = {}
stat_names = []
for n in names:
stat_names += [n]
param_stats[n] = zero_stats.copy()
update_stats[n] = zero_stats.copy()
self.param_stats = param_stats
self.update_stats = update_stats
self.stat_names = stat_names
self.epoch = -1
self.add_stats(stats_in)
def add_stats(self,stats_in):
'''
add a single epoch worth of stats to the array
input:
stats_in - 2-element list of dicts [param_stats,update_stats]
'''
self.epoch += 1
param_stats_in = stats_in[0]
update_stats_in = stats_in[1]
for n in self.stat_names:
self.param_stats[n][self.epoch] = param_stats_in[n]
self.update_stats[n][self.epoch] = update_stats_in[n]
def print_stats(self):
'''
print the stats from most recent epoch and output maximum ratio
'''
print 'update ratios'
max_ratio = 0.0
for n in self.stat_names:
ratio = self.update_stats[n][self.epoch]/self.param_stats[n][self.epoch][1]
max_ratio = max(ratio[1],max_ratio)
print '\t%s:\t' % n,
for v in ratio:
print '% .8f ' % v,
print
print 'average magnitudes'
for n in self.stat_names:
val = self.param_stats[n][self.epoch]
print '\t%s:\t' % n,
for v in val:
print '% .8f ' % v,
print
return max_ratio
def set_initial_biases(params,training_data):
'''
set visible unit biases of CRBM to the appropriate value given training data statistics
'''
# initial vis unit bias
Nv = params['Nv']
Nl = params['Nl']
pv = training_data[:,-(Nv+Nl):-Nl].mean(axis=0)
pl = training_data[:,-Nl:].mean(axis=0)
if params.get('vis_unit') == 'linear':
bv = pv
else:
bv = gp.log(pv/(1-pv) + eps)
eps = float(np.finfo('float32').eps)
bl = gp.log(pl+eps) - gp.log(1-pl+eps)
params['bv'] = gp.concatenate( (bv, bl) )
| ebattenberg/crbm-drum-patterns | crbm.py | Python | gpl-3.0 | 63,541 | 0.013047 |
#!/usr/bin/python
# coding: UTF-8
# Driver for testing pydPiper display
# Uses the curses system to emulate a display
# Written by: Ron Ritchey
import time, curses
import lcd_display_driver
class lcd_curses(lcd_display_driver.lcd_display_driver):
def __init__(self, rows=2, cols=16 ):
self.FONTS_SUPPORTED = False
self.rows = rows
self.cols = cols
self.stdscr = curses.initscr()
self.curx = 0
self.cury = 0
# Set up parent class. Note. This must occur after display has been
# initialized as the parent class may attempt to load custom fonts
super(lcd_curses, self).__init__(rows,cols)
def clear(self):
self.stdscr.clear()
self.stdscr.refresh()
self.curx = 0
self.cury = 0
def setCursor(self, row, col):
self.curx = col
self.cury = row
def loadcustomchars(self, char, fontdata):
# Load custom characters
RuntimeError('Command loadcustomchars not supported')
def cleanup(self):
curses.endwin()
def message(self, text, row=0, col=0):
''' Send string to LCD. Newline wraps to second line'''
self.setCursor(row, col)
self.stdscr.addstr(self.cury, self.curx, text.encode('utf-8'))
self.stdscr.refresh()
def msgtest(self, text, wait=1.5):
self.clear()
lcd.message(text)
time.sleep(wait)
if __name__ == '__main__':
try:
print "Curses Display Test"
lcd = lcd_curses(2,16)
lcd.msgtest("Curses\nPi Powered",2)
lcd.msgtest("This is a driver\nused for testing",2)
accent_min = u"àáâãäçèéëêìíî \nïòóôöøùúûüþÿ"
#for char in accent_min: print char, ord(char)
lcd.msgtest(accent_min,2)
lcd.clear()
except KeyboardInterrupt:
pass
finally:
lcd.clear()
lcd.message("Goodbye!")
time.sleep(2)
lcd.clear()
curses.endwin()
print "Curses Display Test Complete"
| dhrone/pydKeg | displays/lcd_curses.py | Python | mit | 1,770 | 0.039542 |
# -*- coding: utf-8 -*-
#
# English Language RTD & Sphinx config file
#
# Uses ../conf_common.py for most non-language-specific settings.
# Importing conf_common adds all the non-language-specific
# parts to this conf module
try:
from conf_common import * # noqa: F403,F401
except ImportError:
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from conf_common import * # noqa: F403,F401
import datetime
current_year = datetime.datetime.now().year
# General information about the project.
project = u'ESP-IDF 编程指南'
copyright = u'2016 - {} 乐鑫信息科技(上海)股份有限公司'.format(current_year)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'zh_CN'
| espressif/esp-idf | docs/zh_CN/conf.py | Python | apache-2.0 | 789 | 0 |
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
#Copyright (c) 1986 Nick Wong.
#Copyright (c) 2016-2026 TP-NEW Corp.
# License: TP-NEW (www.tp-new.com)
__author__ = "Nick Wong"
"""
用asyncio提供的@asyncio.coroutine可以把一个generator标记为coroutine类型,然后在coroutine内部用yield from调用另一个coroutine实现异步操作
从Python 3.5开始引入了新的语法async和await,可以让coroutine的代码更简洁易读
#generator(生成器)
#coroutine(协程)
async和await是针对coroutine的新语法,要使用新的语法,只需要做两步简单的替换:
1.把@asyncio.coroutine替换为async;
2.把yield from替换为await。
"""
import asyncio
#########旧代码#########
@asyncio.coroutine
def hello():
print('Hello World!')
r = yield from asyncio.sleep(2)
print('Hello again!')
#########新代码#########
async def hello1(): #注:async后跟的函数不能换行,否则语法错误
print('Hello World! 1')
r = await asyncio.sleep(2)
print('Hello again! 1')
#获取EventLoop:
loop = asyncio.get_event_loop()
#执行coroutine
loop.run_until_complete(hello())
loop.run_until_complete(hello1())
loop.close()
| nick-huang-cc/GraffitiSpaceTT | UnderstandStudyPython/IO_coroutine_stu1.py | Python | agpl-3.0 | 1,188 | 0.011752 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Pasos para transformar proy a geod/proy/GK y exportar a DXF
from geod_proy import *
from toDXF import *
#
# 1) Configurar Proyecciones usadas
#
# 1.1) Proyección Mercator Transversal cualquiera.
lat_orig = gms2gyf(-33,52)
merid_c = gms2gyf(-61,14)
pserapio = config_proy(lat_orig, merid_c)
# 1.2) Gauss-Kruger Faja 5
gk_faja5 = proyGK(5)
#
# 2) Transformar entre geodésicas y proyectadas
#
# 2.1) El proceso es proy -> geod -> proy -> geod. Si se comparan los archivos
# de salida, deberían ser iguales entre ambas "proy" y ambas "geod".
# proy -> proy.geod
proy2geod('coord/proy', pserapio)
# proy.geod -> proy.geod.proy
geod2proy('coord/proy.geod', pserapio)
# proy.geod.proy -> proy.geod.proy.geod
proy2geod('coord/proy.geod.proy', pserapio)
#
# 3) Transformar entre geodésicas y proyectadas (GK-Faja5)
#
# 3.1) El proceso es geod -> gk5 -> geod -> gk5. Si se comparan los archivos de
# salida, deberían ser iguales entre ambas "gk5" y ambas "geod".
# proy.geod -> proy.geod.gk5
geod2proy('coord/proy.geod', gk_faja5, 'gk5')
# proy.geod.gk5 -> proy.geod.gk5.geod
proy2geod('coord/proy.geod.gk5', gk_faja5)
# proy.geod.gk5.geod -> proy.geod.gk5.geod.gk5
geod2proy('coord/proy.geod.gk5.geod', gk_faja5, 'gk5')
# proy.geod.gk5.geod.gk5 -> proy.geod.gk5.geod.gk5.geod
proy2geod('coord/proy.geod.gk5.geod.gk5', gk_faja5)
#
# 4) Exportar a DXF
#
# Sólo tiene sentido mandar a DXF las coordenadas proyectadas.
coord2dxf('coord/proy')
coord2dxf('coord/proy.geod.proy')
coord2dxf('coord/proy.geod.gk5')
coord2dxf('coord/proy.geod.gk5.geod.gk5')
| quijot/agrimpy-package | agrimpy/test.py | Python | mit | 1,621 | 0.001858 |
import os
import sys
import codecs
from fnmatch import fnmatchcase
from distutils.util import convert_path
from setuptools import setup, find_packages
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Note: you may want to copy this into your setup.py file verbatim, as
# you can't import this from another package, when you don't know if
# that package is installed yet.
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if (os.path.isfile(os.path.join(fn, '__init__.py'))
and not prefix):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out
README = read('README.rst')
PACKAGE = "like_button"
VERSION = __import__(PACKAGE).__version__
setup(
name='django-like-button',
version=VERSION,
description='Django App for adding a Facebook like button',
maintainer='John Costa',
maintainer_email='john.costa@gmil.com',
url='https://github.com/johncosta/django-like-button',
classifiers=[
'Programming Language :: Python',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
package_data = find_package_data(PACKAGE, only_in_packages=False),
packages=find_packages(),
long_description = README,
setup_requires = [
'versiontools >= 1.8.2',
],
)
| johncosta/django-like-button | setup.py | Python | bsd-3-clause | 4,721 | 0.003601 |
from datetime import datetime
import os
import pytz
class PrintingLogObserver(object):
def __init__(self, fp):
self.fp = fp
def __call__(self, event):
if event.get('log_format', None):
message = event['log_format'].format(**event)
else:
message = event.get('message', '')
pid = str(event.get('pid', os.getpid()))
log_struct = {
'time': datetime.fromtimestamp(event['log_time'], pytz.utc).time().replace(microsecond=0).isoformat(),
'pid': pid,
'source': event.get('cb_namespace', event['log_namespace']).split('.')[-1],
'message': message,
'ws': max(0, 35 - len(pid))
}
self.fp.write('{time} [{source:<{ws}} {pid}] {message}\n'.format(**log_struct))
| MD-Studio/MDStudio | mdstudio/mdstudio/logging/impl/printing_observer.py | Python | apache-2.0 | 805 | 0.003727 |
#!/usr/bin/env python
#
# HPFeeds.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import sys
import os
import struct
import socket
import hashlib
import logging
import json
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
log = logging.getLogger("Thug")
class FeedUnpack(object):
def __init__(self):
self.buf = bytearray()
def __iter__(self):
return self
def next(self):
return self.unpack()
def feed(self, data):
self.buf.extend(data)
def unpack(self):
if len(self.buf) < 5:
raise StopIteration('No message')
ml, opcode = struct.unpack('!iB', buffer(self.buf, 0, 5))
if len(self.buf) < ml:
raise StopIteration('No message')
data = bytearray(buffer(self.buf, 5, ml - 5))
del self.buf[:ml]
return opcode, data
class HPFeeds(object):
formats = ('maec11', )
OP_ERROR = 0
OP_INFO = 1
OP_AUTH = 2
OP_PUBLISH = 3
OP_SUBSCRIBE = 4
def __init__(self, thug_version):
self.unpacker = FeedUnpack()
self.opts = dict()
self.url = ""
self.__init_config()
def __init_config(self):
config = ConfigParser.ConfigParser()
conf_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, 'logging.conf')
config.read(conf_file)
for option in config.options('hpfeeds'):
self.opts[option] = str(config.get('hpfeeds', option))
def set_url(self, url):
self.url = url
def msg_hdr(self, op, data):
return struct.pack('!iB', 5 + len(data), op) + data
def msg_publish(self, chan, data):
#if isinstance(data, str):
# data = data.encode('latin1')
return self.msg_hdr(self.OP_PUBLISH,
struct.pack('!B', len(self.opts['ident'])) +
self.opts['ident'] +
struct.pack('!B', len(chan)) +
chan +
data)
def msg_auth(self, rand):
hash = hashlib.sha1(rand + self.opts['secret']).digest()
return self.msg_hdr(self.OP_AUTH,
struct.pack('!B', len(self.opts['ident'])) +
self.opts['ident'] +
hash)
def msg_send(self, msg):
self.sockfd.send(msg)
def get_data(self, host, port):
self.sockfd.settimeout(3)
try:
self.sockfd.connect((host, port))
except:
log.warning('[HPFeeds] Unable to connect to broker')
return None
try:
d = self.sockfd.recv(1024)
except socket.timeout:
log.warning('[HPFeeds] Timeout on banner')
return None
self.sockfd.settimeout(None)
return d
def publish_data(self, d, chan, pubdata):
published = False
while d and not published:
self.unpacker.feed(d)
for opcode, data in self.unpacker:
if opcode == self.OP_INFO:
rest = buffer(data, 0)
name, rest = rest[1:1 + ord(rest[0])], buffer(rest, 1 + ord(rest[0]))
rand = str(rest)
self.msg_send(self.msg_auth(rand))
self.msg_send(self.msg_publish(chan, pubdata))
published = True
self.sockfd.settimeout(0.1)
if opcode == self.OP_ERROR:
log.warning('[HPFeeds] Error message from server: {0}'.format(data))
try:
d = self.sockfd.recv(1024)
except socket.timeout:
break
def __log_event(self, pubdata):
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data = self.get_data(self.opts['host'], int(self.opts['port']))
if data is None:
return
self.publish_data(data, 'thug.events', pubdata)
self.sockfd.close()
def log_event(self, basedir):
if log.ThugOpts.local:
return
m = None
for module in self.formats:
if module in log.ThugLogging.modules:
p = log.ThugLogging.modules[module]
m = getattr(p, 'get_%s_data' % (module, ), None)
if m:
break
if m is None:
return
data = m(basedir)
self.__log_event(data)
def log_file(self, pubdata, url = None, params = None):
if log.ThugOpts.local:
return
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data = self.get_data(self.opts['host'], int(self.opts['port']))
if data is None:
return
self.publish_data(data, 'thug.files', json.dumps(pubdata))
self.sockfd.close()
def log_warning(self, pubdata):
if log.ThugOpts.local:
return
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data = self.get_data(self.opts['host'], int(self.opts['port']))
if data is None:
return
self.publish_data(data, 'thug.warnings', json.dumps({'url': self.url, 'warning': pubdata}))
self.sockfd.close()
if __name__ == '__main__':
hpfeeds = HPFeeds()
hpfeeds.log_event('Test foobar!')
| amohanta/thug | src/Logging/modules/HPFeeds.py | Python | gpl-2.0 | 6,225 | 0.008193 |
#This is a function containing an algorithmic model of the Scribner log rule,
# board-foot log volume tables. It outputs the Scribner log volume for an
# input log length and top diameter.
#
# Annotation: [v]=logvolume(L,TD)
# v = Scribner log volume
# L = log length
# TD = top diameter
import sys
volume_table_1 = [1.07,4.9,6.043,7.14,8.88,10.0,11.528,13.29,14.99,17.499,18.99,20.88,23.51,25.218,28.677,31.249,34.22,36.376,38.04,41.06,44.376,45.975]
volume_table_2 = [1.160,1.400,1.501,2.084,3.126,3.749 , 1.249,1.608,1.854,2.410,3.542,4.167 , 1.57,1.8,2.2,2.9,3.815,4.499]
def logvolume_2(L,TD):
L = L - (0.8333) #Account for 10 inch over cut
if TD < 5:
L = 0 # makes v = 0 in the output
print "Top diameter reached:", TD
TD = 11 # handles out-of-bounds errors
print "Error! Top diameter minimum limit of 5 inches."
elif TD >= 32:
print 'Error! %3.1f inch top diameter exceeds the current 32.0 inch program capability.\n' %TD
L = 0
TD = 11
elif L > 40:
print "Log length reached:", L
L = 0
print 'Error! Maximum log length is 40 feet.'
elif L < 1:
print "Log length reached:", L
L = 0
print 'Error! Minimum log length is 16 feet.'
if (TD >= 6) & (TD <= 11):
TD = TD - 6 # normalize TD with 6 for array indexing
if L < 16:
v = 10 * round((L * volume_table_2[TD]) / 10.0)
elif L < 31:
v = 10 * round((L * volume_table_2[TD + 6]) / 10.0)
elif L < 41:
v = 10 * round((L * volume_table_2[TD + 12]) / 10.0)
else:
v = 0
else:
if TD == 5:
v = 10 * round((L * volume_table_1[0]) / 10.0)
else:
v = 10 * round((L * volume_table_1[TD - 11]) / 10.0)
return v
def debug_logvolume():
print
v = logvolume_2(input("length: "),input("topdia: "))
print "volume is:", v
| dhazel/buck | bucking/logvolume_2.py | Python | gpl-2.0 | 2,031 | 0.031019 |
__author__ = 'Cjsheaf'
import csv
from threading import Lock
class ResultsWriter:
""" This class is designed to take out-of-order result data from multiple threads and write
them to an organized csv-format file.
All data is written to disk at the very end via the "write_results" method, since there
is no way to know how many results there will be ahead of time, and they will not arrive
in any particular order.
"""
def __init__(self, csv_filename):
self.csv_filename = csv_filename
self.entries = {}
def put_rmsd(self, entry_name, rmsd):
if self.entries.get(entry_name) is None:
self.entries[entry_name] = Entry(entry_name)
self.entries.rmsd = rmsd
def put_compound_scores(self, entry_name, scores):
""" Argument 'scores' should be a 9-item tuple or list. """
if len(scores) is not 9:
raise ValueError(
'Attempted to save results for a compound "{compound}" in entry "{entry}"'
'with {num_scores} number of results. Expected 9 results.'.format(
compound=scores(0),
entry=entry_name,
num_scores=len(scores)
)
)
if self.entries.get(entry_name) is None:
self.entries[entry_name] = Entry(entry_name)
self.entries[entry_name].compounds.append(
Compound(scores[1], scores[2], scores[3], scores[4], scores[5], scores[6], scores[7],
scores[8], scores[9])
)
def _sanity_check_entry(self):
for e in self.entries:
if e.rmsd is None:
raise RuntimeError('Entry "{entry}" has no RMSD!'.format(entry=e.name))
if len(e.compounds) is 0:
raise RuntimeError('Entry "{entry}" has no compounds!'.format(entry=e.name))
for c in e.compounds:
if c.mseq is None:
raise NotImplementedError
def write_results(self):
csv_file = open(self.csv_filename, 'w', newline='')
writer = csv.writer(self.csv_file)
writer.writerow('name', 'rmsd', 'compound', 'rseq', 'mseq', 'rmsd_refine', 'e_conf',
'e_place', 'e_score1', 'e_score2', 'e_refine')
for e in self.entries:
writer.writerow(e.name, e.rmsd)
for c in e.compounds:
writer.writerow('', '', c.name, c.rseq, c.mseq, c.rmsd_refine, c.e_conf, c.e_place,
c.e_score1, c.e_score2, c.e_refine)
class Entry:
def __init__(self, name):
self.name = name
self.rmsd = None
self.compounds = []
def add_compound(self, compound_name, compound):
self.compounds[compound_name] = compound
class Compound:
def __init__(self, name, rseq, mseq, rmsd_refine, e_conf, e_place, e_score1, e_score2, e_refine):
self.name = name
self.rseq = rseq
self.mseq = mseq
self.rmsd_refine = rmsd_refine
self.e_conf = e_conf
self.e_place = e_place
self.e_score1 = e_score1
self.e_score2 = e_score2
self.e_refine = e_refine
| Cjsheaf/Variation-Discovery-Pipeline | Pipeline_Core/Results_Writer.py | Python | gpl-2.0 | 3,199 | 0.003439 |
total = 0
n = 0
stop = 0
nextMark = input('Type in a mark: ')
while stop == 0:
nextMark = eval(nextMark)
total = total+nextMark
n = n + 1
nextMark = input('Hit enter to stop, or type in a mark: ')
if nextMark == "":
stop = 1
print("You entered", n, 'marks. The average is:',total/n)
| MrColwell/PythonProfessionalLearning | PythonForTeachers/studentExercises/8_2_average.py | Python | mit | 339 | 0.0059 |
#!/usr/bin/env python
import subprocess as sp
from runtest import TestBase
XDIR='xxx'
YDIR='yyy'
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'diff', """
#
# uftrace diff
# [0] base: xxx (from uftrace record -d yyy -F main tests/t-diff 1 )
# [1] diff: yyy (from uftrace record -d xxx -F main tests/t-diff 0 )
#
Total time (diff) Self time (diff) Calls (diff) Function
=================================== =================================== ================================ ================================================
1.075 us 1.048 us -0.027 us 1.075 us 1.048 us -0.027 us 1 1 +0 atoi
158.971 us 0.118 us -158.853 us 1.437 us 0.118 us -1.319 us 1 1 +0 bar
1.235 ms 0.645 us -1.235 ms 3.276 us 0.527 us -2.749 us 1 1 +0 foo
1.309 ms 3.975 us -1.305 ms 2.601 us 2.282 us -0.319 us 1 1 +0 main
1.300 ms - -1.300 ms 1.300 ms - -1.300 ms 3 0 -3 usleep
""")
def prerun(self, timeout):
self.subcmd = 'record'
self.option = '-d %s -F main' % XDIR
self.exearg = 't-' + self.name + ' 0'
record_cmd = self.runcmd()
self.pr_debug('prerun command: ' + record_cmd)
sp.call(record_cmd.split())
self.option = '-d %s -F main' % YDIR
self.exearg = 't-' + self.name + ' 1'
record_cmd = self.runcmd()
self.pr_debug('prerun command: ' + record_cmd)
sp.call(record_cmd.split())
return TestBase.TEST_SUCCESS
def setup(self):
self.subcmd = 'report'
self.option = '--diff-policy full,no-abs -s call,total'
self.exearg = '-d %s --diff %s' % (YDIR, XDIR)
def sort(self, output):
""" This function post-processes output of the test to be compared .
It ignores blank and comment (#) lines and remaining functions. """
result = []
for ln in output.split('\n'):
if ln.startswith('#') or ln.strip() == '':
continue
line = ln.split()
if line[0] == 'Total':
continue
if line[0].startswith('='):
continue
# A report line consists of following data
# [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] [13] [14] [15]
# tT/0 unit tT/1 unit tT/d unit tS/0 unit tS/1 unit tS/d unit call/0 call/1 call/d function
if line[-1].startswith('__'):
continue
result.append('%s %s %s %s' % (line[-4], line[-3], line[-2], line[-1]))
return '\n'.join(result)
| namhyung/uftrace | tests/t159_report_diff_policy2.py | Python | gpl-2.0 | 2,877 | 0.004866 |
""" Initializer
Initialize application data.
Created by Lahiru Pathirage @ Mooniak<lpsandaruwan@gmail.com> on 19/12/2016
"""
from session import Base
from session import mysql_con_string
from sqlalchemy import create_engine
from utility import DBManager
def initialize():
engine = create_engine(
mysql_con_string
)
Base.metadata.create_all(engine, checkfirst=True)
DBManager().update_font_cache()
| fontman/fontman-server | utility/Initializer.py | Python | gpl-3.0 | 428 | 0 |
#!/usr/bin/env python
# coding=utf-8
import commands
import sys
from docopt import docopt
#from handler import LogFileClient
from sdutil.log_util import getLogger
from sdutil.date_util import *
reload(sys)
sys.setdefaultencoding('utf-8')
from elasticsearch import Elasticsearch
from pdb import *
import requests
import json
logger = getLogger(__name__, __file__)
"""
host like:"http://172.17.0.33:8081"
"""
def count_from_es(host,index,query_str,startTime,endTime,scroll=False):
logger.info('search_from_es startTime:%s,endTime:%s'%(startTime,endTime))
startTimeStamp = int(str2timestamp(startTime))*1000
endTimeStamp = int(str2timestamp(endTime))*1000+999
data_post_search = {"query":{"filtered":{"query":{"query_string":{"query":query_str,"analyze_wildcard":'true'}},"filter":{"bool":{"must":[{"range":{"@timestamp":{"gte":startTimeStamp,"lte":endTimeStamp,"format":"epoch_millis"}}}],"must_not":[]}}}}}
logger.info('search_from_es,post_data:%s'%(data_post_search))
es = Elasticsearch(host,timeout=120)
response = es.count(index=index, body=data_post_search)
return response
def do_search(host,index,query_str,startTimeStamp,endTimeStamp,scroll,_source,time_step):
es = Elasticsearch(host,timeout=120)
response ={}
data_post_search = {"query":{"filtered":{"query":{"query_string":{"query":query_str,"analyze_wildcard":'true'}},"filter":{"bool":{"must":[{"range":{"@timestamp":{"gte":startTimeStamp,"lte":endTimeStamp,"format":"epoch_millis"}}}],"must_not":[]}}}}}
logger.info('search_from_es,post_data:%s'%(data_post_search))
if not scroll:
if _source:
response = es.search(index=index, body=data_post_search,size=10000,_source=_source)
else:
response = es.search(index=index, body=data_post_search,size=10000)
else:
page_size=10000
scan_resp =None
if _source:
scan_resp = es.search(index=index, body=data_post_search,search_type="scan", scroll="5m",size=page_size,_source=_source)
else:
scan_resp = es.search(index=index, body=data_post_search,search_type="scan", scroll="5m",size=page_size)
scrollId= scan_resp['_scroll_id']
response={}
total = scan_resp['hits']['total']
response_list =[]
scrollId_list =[]
for page_num in range(total/page_size + 1):
response_tmp ={}
response_tmp = es.scroll(scroll_id=scrollId, scroll= "5m")
#es.clear_scroll([scrollId])
scrollId = response_tmp['_scroll_id']
scrollId_list.append(str(scrollId))
response_list.append(response_tmp)
if response.has_key('hits'):
_hits = response['hits']
_hits['hits']+=response_tmp['hits']['hits']
response['hits'] = _hits
else:
response['hits'] = response_tmp['hits']
return response
def search_from_es(host,index,query_str,startTime,endTime,scroll=False,_source=None,time_step=0):
logger.info('search_from_es startTime:%s,endTime:%s'%(startTime,endTime))
startTimeStamp = int(str2timestamp(startTime))*1000
endTimeStamp = int(str2timestamp(endTime))*1000+999
all_response={}
timegap = endTimeStamp-startTimeStamp
if time_step>0:
_s1=startTimeStamp
_s2=startTimeStamp+time_step
run_time =0
all_response = {}
time_count = {}
while(_s2<=endTimeStamp):
response_tmp = do_search(host,index,query_str,_s1,_s2,scroll,_source,time_step)
#response_tmp = do_search(_s1,_s2)
if all_response.has_key('hits'):
_hits = all_response['hits']
_hits['hits']+=response_tmp['hits']['hits']
all_response['hits'] = _hits
else:
all_response['hits'] = response_tmp['hits']
run_time+=1
_s1=_s1+time_step
_s2 = _s2+time_step
if time_count.has_key(_s1):
time_count[_s1]+=1
else:
time_count[_s1]=1
if time_count.has_key(_s2):
time_count[_s2]+=1
else:
time_count[_s2]=1
print '----run_time:',run_time,'_s1:',_s1,',_s2:',_s2,',len:',len(all_response['hits']['hits'])
print '-s1--',time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(_s1/1000))
print '-s2--',time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(_s2/1000))
print time_count
time.sleep(2)
else:
all_response = do_search(host,index,query_str,startTimeStamp,endTimeStamp,scroll,_source,time_step)
return all_response
| zhaochl/python-utils | es/elasticsearch_util.py | Python | apache-2.0 | 4,771 | 0.032907 |
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from ocfnet.database import Model
from ocfnet.media.models import *
from ocfnet.user.models import *
try:
from config import DATABASE_URL
except:
from configdist import DATABASE_URL
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
alembic_config = config.get_section(config.config_ini_section)
alembic_config['sqlalchemy.url'] = DATABASE_URL
engine = engine_from_config(alembic_config, poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| oregoncountryfair/ocfnet | ocfnet/migrations/env.py | Python | mit | 2,236 | 0.001342 |
from sys import stdin, stdout
n = int(stdin.read())
if n == 1:
res = "14"
elif n == 2:
res = "155"
else:
res = "1575" + ("0" * (n - 3))
stdout.write(res + "\n") | FireFry/online-judge-solutions | acm.timus.ru/1385.py | Python | gpl-2.0 | 166 | 0.024096 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <oss@chathamfinancial.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rabbitmq_user
short_description: Adds or removes users to RabbitMQ
description:
- Add or remove users to RabbitMQ and assign permissions
version_added: "1.1"
author: Chris Hoffman
options:
user:
description:
- Name of user to add
required: true
default: null
aliases: [username, name]
password:
description:
- Password of user to add.
- To change the password of an existing user, you must also specify
C(force=yes).
required: false
default: null
tags:
description:
- User tags specified as comma delimited
required: false
default: null
vhost:
description:
- vhost to apply access privileges.
required: false
default: /
node:
description:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
version_added: "1.2"
configure_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
required: false
default: ^$
write_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
required: false
default: ^$
read_priv:
description:
- Regular expression to restrict configure actions on a resource
for the specified vhost.
- By default all actions are restricted.
required: false
default: ^$
force:
description:
- Deletes and recreates the user.
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if user is to be added or removed
required: false
default: present
choices: [present, absent]
'''
EXAMPLES = '''
# Add user to server and assign full access control
- rabbitmq_user: user=joe
password=changeme
vhost=/
configure_priv=.*
read_priv=.*
write_priv=.*
state=present
'''
class RabbitMqUser(object):
def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node):
self.module = module
self.username = username
self.password = password
self.node = node
if tags is None:
self.tags = list()
else:
self.tags = tags.split(',')
permissions = dict(
vhost=vhost,
configure_priv=configure_priv,
write_priv=write_priv,
read_priv=read_priv
)
self.permissions = permissions
self._tags = None
self._permissions = None
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q', '-n', self.node]
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get(self):
users = self._exec(['list_users'], True)
for user_tag in users:
user, tags = user_tag.split('\t')
if user == self.username:
for c in ['[',']',' ']:
tags = tags.replace(c, '')
if tags != '':
self._tags = tags.split(',')
else:
self._tags = list()
self._permissions = self._get_permissions()
return True
return False
def _get_permissions(self):
perms_out = self._exec(['list_user_permissions', self.username], True)
for perm in perms_out:
vhost, configure_priv, write_priv, read_priv = perm.split('\t')
if vhost == self.permissions['vhost']:
return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv)
return dict()
def add(self):
if self.password is not None:
self._exec(['add_user', self.username, self.password])
else
self._exec(['add_user', self.username, ''])
self._exec(['clear_password', self.username])
def delete(self):
self._exec(['delete_user', self.username])
def set_tags(self):
self._exec(['set_user_tags', self.username] + self.tags)
def set_permissions(self):
cmd = ['set_permissions']
cmd.append('-p')
cmd.append(self.permissions['vhost'])
cmd.append(self.username)
cmd.append(self.permissions['configure_priv'])
cmd.append(self.permissions['write_priv'])
cmd.append(self.permissions['read_priv'])
self._exec(cmd)
def has_tags_modifications(self):
return set(self.tags) != set(self._tags)
def has_permissions_modifications(self):
return self._permissions != self.permissions
def main():
arg_spec = dict(
user=dict(required=True, aliases=['username', 'name']),
password=dict(default=None),
tags=dict(default=None),
vhost=dict(default='/'),
configure_priv=dict(default='^$'),
write_priv=dict(default='^$'),
read_priv=dict(default='^$'),
force=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
node=dict(default='rabbit')
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
username = module.params['user']
password = module.params['password']
tags = module.params['tags']
vhost = module.params['vhost']
configure_priv = module.params['configure_priv']
write_priv = module.params['write_priv']
read_priv = module.params['read_priv']
force = module.params['force']
state = module.params['state']
node = module.params['node']
rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node)
changed = False
if rabbitmq_user.get():
if state == 'absent':
rabbitmq_user.delete()
changed = True
else:
if force:
rabbitmq_user.delete()
rabbitmq_user.add()
rabbitmq_user.get()
changed = True
if rabbitmq_user.has_tags_modifications():
rabbitmq_user.set_tags()
changed = True
if rabbitmq_user.has_permissions_modifications():
rabbitmq_user.set_permissions()
changed = True
elif state == 'present':
rabbitmq_user.add()
rabbitmq_user.set_tags()
rabbitmq_user.set_permissions()
changed = True
module.exit_json(changed=changed, user=username, state=state)
# import module snippets
from ansible.module_utils.basic import *
main()
| andsens/ansible-modules-extras | messaging/rabbitmq_user.py | Python | gpl-3.0 | 7,797 | 0.001411 |
""" Test plugin for Artifactor """
import time
from artifactor import ArtifactorBasePlugin
class Test(ArtifactorBasePlugin):
def plugin_initialize(self):
self.register_plugin_hook("start_test", self.start_test)
self.register_plugin_hook("finish_test", self.finish_test)
def start_test(self, test_name, test_location, artifact_path):
filename = artifact_path + "-" + self.ident + ".log"
with open(filename, "a+") as f:
f.write(test_name + "\n")
f.write(str(time.time()) + "\n")
for i in range(2):
time.sleep(2)
print("houh")
def finish_test(self, test_name, artifact_path):
print("finished")
| anurag03/integration_tests | artifactor/plugins/test.py | Python | gpl-2.0 | 705 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# acertmgr - various support functions
# Copyright (c) Markus Hauschild & David Klaftenegger, 2016.
# Copyright (c) Rudolf Mayerhofer, 2019.
# available under the ISC license, see LICENSE
import base64
import datetime
import io
import os
import re
import stat
import sys
import traceback
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, ec, padding
from cryptography.hazmat.primitives.asymmetric.utils import decode_dss_signature
from cryptography.utils import int_to_bytes
from cryptography.x509.oid import NameOID, ExtensionOID
try:
from cryptography.x509 import ocsp
except ImportError:
pass
try:
from cryptography.hazmat.primitives.asymmetric import ed25519, ed448
except ImportError:
pass
try:
from urllib.request import urlopen, Request # Python 3
except ImportError:
from urllib2 import urlopen, Request # Python 2
LOG_REPLACEMENTS = {}
class InvalidCertificateError(Exception):
pass
# @brief a simple, portable indent function
def indent(text, spaces=0):
ind = ' ' * spaces
return os.linesep.join(ind + line for line in text.splitlines())
# @brief wrapper for log output
def log(msg, exc=None, error=False, warning=False):
if error:
prefix = "Error: "
elif warning:
prefix = "Warning: "
else:
prefix = ""
output = prefix + msg
for k, v in LOG_REPLACEMENTS.items():
output = output.replace(k, v)
if exc:
_, exc_value, _ = sys.exc_info()
if not getattr(exc, '__traceback__', None) and exc == exc_value:
# Traceback handling on Python 2 is ugly, so we only output it if the exception is the current sys one
formatted_exc = traceback.format_exc()
else:
formatted_exc = traceback.format_exception(type(exc), exc, getattr(exc, '__traceback__', None))
exc_string = ''.join(formatted_exc) if isinstance(formatted_exc, list) else str(formatted_exc)
output += os.linesep + indent(exc_string, len(prefix))
if error or warning:
sys.stderr.write(output + os.linesep)
sys.stderr.flush() # force flush buffers after message was written for immediate display
else:
sys.stdout.write(output + os.linesep)
sys.stdout.flush() # force flush buffers after message was written for immediate display
# @brief wrapper for downloading an url
def get_url(url, data=None, headers=None):
return urlopen(Request(url, data=data, headers={} if headers is None else headers))
# @brief check whether existing certificate is still valid or expiring soon
# @param crt_file string containing the path to the certificate file
# @param ttl_days the minimum amount of days for which the certificate must be valid
# @return True if certificate is still valid for at least ttl_days, False otherwise
def is_cert_valid(cert, ttl_days):
now = datetime.datetime.now()
if cert.not_valid_before > now:
raise InvalidCertificateError("Certificate seems to be from the future")
expiry_limit = now + datetime.timedelta(days=ttl_days)
if cert.not_valid_after < expiry_limit:
return False
return True
# @brief create a certificate signing request
# @param names list of domain names the certificate should be valid for
# @param key the key to use with the certificate in pyopenssl format
# @param must_staple whether or not the certificate should include the OCSP must-staple flag
# @return the CSR in pyopenssl format
def new_cert_request(names, key, must_staple=False):
primary_name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME,
names[0].decode('utf-8') if getattr(names[0], 'decode', None) else
names[0])])
all_names = x509.SubjectAlternativeName(
[x509.DNSName(name.decode('utf-8') if getattr(name, 'decode', None) else name) for name in names])
req = x509.CertificateSigningRequestBuilder()
req = req.subject_name(primary_name)
req = req.add_extension(all_names, critical=False)
if must_staple:
if getattr(x509, 'TLSFeature', None):
req = req.add_extension(x509.TLSFeature(features=[x509.TLSFeatureType.status_request]), critical=False)
else:
log('OCSP must-staple ignored as current version of cryptography does not support the flag.', warning=True)
req = req.sign(key, hashes.SHA256(), default_backend())
return req
# @brief generate a new account key
# @param path path where the new key file should be written in PEM format (optional)
def new_account_key(path=None, key_algo=None, key_size=None):
return new_ssl_key(path, key_algo, key_size)
# @brief generate a new ssl key
# @param path path where the new key file should be written in PEM format (optional)
def new_ssl_key(path=None, key_algo=None, key_size=None):
if not key_algo or key_algo.lower() == 'rsa':
if not key_size:
key_size = 4096
key_format = serialization.PrivateFormat.TraditionalOpenSSL
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=key_size,
backend=default_backend()
)
elif key_algo.lower() == 'ec':
if not key_size or key_size == 256:
key_curve = ec.SECP256R1
elif key_size == 384:
key_curve = ec.SECP384R1
elif key_size == 521:
key_curve = ec.SECP521R1
else:
raise ValueError("Unsupported EC curve size parameter: {}".format(key_size))
key_format = serialization.PrivateFormat.PKCS8
private_key = ec.generate_private_key(curve=key_curve, backend=default_backend())
elif key_algo.lower() == 'ed25519' and "cryptography.hazmat.primitives.asymmetric.ed25519":
key_format = serialization.PrivateFormat.PKCS8
private_key = ed25519.Ed25519PrivateKey.generate()
elif key_algo.lower() == 'ed448' and "cryptography.hazmat.primitives.asymmetric.ed448":
key_format = serialization.PrivateFormat.PKCS8
private_key = ed448.Ed448PrivateKey.generate()
else:
raise ValueError("Unsupported key algorithm: {}".format(key_algo))
if path is not None:
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=key_format,
encryption_algorithm=serialization.NoEncryption(),
)
with io.open(path, 'wb') as pem_out:
pem_out.write(pem)
if hasattr(os, 'chmod'):
try:
os.chmod(path, int("0400", 8))
except OSError:
log('Could not set file permissions on {0}!'.format(path), warning=True)
else:
log('Keyfile permission handling unavailable on this platform', warning=True)
return private_key
# @brief read a key from file
# @param path path to file
# @param key indicate whether we are loading a key
# @param csr indicate whether we are loading a csr
# @return the key in pyopenssl format
def read_pem_file(path, key=False, csr=False):
with io.open(path, 'r') as f:
if key:
return serialization.load_pem_private_key(f.read().encode('utf-8'), None, default_backend())
elif csr:
return x509.load_pem_x509_csr(f.read().encode('utf8'), default_backend())
else:
return convert_pem_str_to_cert(f.read())
# @brief write cert data to PEM formatted file
def write_pem_file(crt, path, perms=None):
if hasattr(os, 'chmod') and os.path.exists(path):
try:
os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
except OSError:
log('Could not make file ({0}) writable'.format(path), warning=True)
with io.open(path, "w") as f:
f.write(convert_cert_to_pem_str(crt))
if perms:
if hasattr(os, 'chmod'):
try:
os.chmod(path, perms)
except OSError:
log('Could not set file permissions ({0}) on {1}!'.format(perms, path), warning=True)
else:
log('PEM-File permission handling unavailable on this platform', warning=True)
# @brief download the issuer ca for a given certificate
# @param cert certificate data
# @returns ca certificate data
def download_issuer_ca(cert):
aia = cert.extensions.get_extension_for_oid(ExtensionOID.AUTHORITY_INFORMATION_ACCESS)
ca_issuers = None
for data in aia.value:
if data.access_method == x509.OID_CA_ISSUERS:
ca_issuers = data.access_location.value
break
if not ca_issuers:
log("Could not determine issuer CA for given certificate: {}".format(cert), error=True)
return None
log("Downloading CA certificate from {}".format(ca_issuers))
resp = get_url(ca_issuers)
code = resp.getcode()
if code >= 400:
log("Could not download issuer CA (error {}) for given certificate: {}".format(code, cert), error=True)
return None
return x509.load_der_x509_certificate(resp.read(), default_backend())
# @brief determine all san domains on a given certificate
def get_cert_domains(cert):
san_cert = cert.extensions.get_extension_for_oid(ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
domains = set()
domains.add(cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value)
if san_cert:
for d in san_cert.value:
domains.add(d.value)
return domains
# @brief determine certificate cn
def get_cert_cn(cert):
return "CN={}".format(cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value)
# @brief determine certificate end of validity
def get_cert_valid_until(cert):
return cert.not_valid_after
# @brief convert certificate to PEM format
# @param cert certificate object or a list thereof
# @return the certificate in PEM format
def convert_cert_to_pem_str(cert):
if not isinstance(cert, list):
cert = [cert]
result = list()
for data in cert:
result.append(data.public_bytes(serialization.Encoding.PEM).decode('utf8'))
return '\n'.join(result)
# @brief load a PEM certificate from str
# @return a certificate object or a list of objects if multiple are in the string
def convert_pem_str_to_cert(certdata):
certs = re.findall(r'(-----BEGIN CERTIFICATE-----\n[^\-]+\n-----END CERTIFICATE-----)',
certdata, re.DOTALL)
result = list()
for data in certs:
result.append(x509.load_pem_x509_certificate(data.encode('utf8'), default_backend()))
return result[0] if len(result) == 1 else result
# @brief serialize cert/csr to DER bytes
def convert_cert_to_der_bytes(data):
return data.public_bytes(serialization.Encoding.DER)
# @brief load a DER certificate from str
def convert_der_bytes_to_cert(data):
return x509.load_der_x509_certificate(data, default_backend())
# @brief determine key signing algorithm and jwk data
# @return key algorithm, signature algorithm, key numbers as a dict
def get_key_alg_and_jwk(key):
if isinstance(key, rsa.RSAPrivateKey):
# See https://tools.ietf.org/html/rfc7518#section-6.3
numbers = key.public_key().public_numbers()
return "RS256", {"kty": "RSA",
"e": bytes_to_base64url(int_to_bytes(numbers.e)),
"n": bytes_to_base64url(int_to_bytes(numbers.n))}
elif isinstance(key, ec.EllipticCurvePrivateKey):
# See https://tools.ietf.org/html/rfc7518#section-6.2
numbers = key.public_key().public_numbers()
if isinstance(numbers.curve, ec.SECP256R1):
alg = 'ES256'
crv = 'P-256'
elif isinstance(numbers.curve, ec.SECP384R1):
alg = 'ES384'
crv = 'P-384'
elif isinstance(numbers.curve, ec.SECP521R1):
alg = 'ES512'
crv = 'P-521'
else:
raise ValueError("Unsupported EC curve in key: {}".format(key))
full_octets = (int(crv[2:]) + 7) // 8
return alg, {"kty": "EC", "crv": crv,
"x": bytes_to_base64url(int_to_bytes(numbers.x, full_octets)),
"y": bytes_to_base64url(int_to_bytes(numbers.y, full_octets))}
elif "cryptography.hazmat.primitives.asymmetric.ed25519" in sys.modules and isinstance(key,
ed25519.Ed25519PrivateKey):
# See https://tools.ietf.org/html/rfc8037#appendix-A.2
return "EdDSA", {"kty": "OKP", "crv": "Ed25519",
"x": bytes_to_base64url(key.public_key().public_bytes(encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw)
)}
elif "cryptography.hazmat.primitives.asymmetric.ed448" in sys.modules and isinstance(key,
ed448.Ed448PrivateKey):
return "EdDSA", {"kty": "OKP", "crv": "Ed448",
"x": bytes_to_base64url(key.public_key().public_bytes(encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw)
)}
else:
raise ValueError("Unsupported key: {}".format(key))
# @brief sign string with key
def signature_of_str(key, string):
alg, _ = get_key_alg_and_jwk(key)
data = string.encode('utf8')
if alg == 'RS256':
return key.sign(data, padding.PKCS1v15(), hashes.SHA256())
elif alg.startswith('ES'):
full_octets = (int(alg[2:]) + 7) // 8
if alg == 'ES256':
der_sig = key.sign(data, ec.ECDSA(hashes.SHA256()))
elif alg == 'ES384':
der_sig = key.sign(data, ec.ECDSA(hashes.SHA384()))
elif alg == 'ES512':
der_sig = key.sign(data, ec.ECDSA(hashes.SHA512()))
else:
raise ValueError("Unsupported EC signature algorithm: {}".format(alg))
# convert DER signature to RAW format (https://tools.ietf.org/html/rfc7518#section-3.4)
r, s = decode_dss_signature(der_sig)
return int_to_bytes(r, full_octets) + int_to_bytes(s, full_octets)
elif alg == 'EdDSA':
return key.sign(data)
else:
raise ValueError("Unsupported signature algorithm: {}".format(alg))
# @brief hash a string
def hash_of_str(string):
account_hash = hashes.Hash(hashes.SHA256(), backend=default_backend())
account_hash.update(string.encode('utf8'))
return account_hash.finalize()
# @brief helper function to base64 encode for JSON objects
# @param b the byte-string to encode
# @return the encoded string
def bytes_to_base64url(b):
return base64.urlsafe_b64encode(b).decode('utf8').replace("=", "")
# @brief check whether existing target file is still valid or source crt has been updated
# @param target string containing the path to the target file
# @param file string containing the path to the certificate file
# @return True if target file is at least as new as the certificate, False otherwise
def target_is_current(target, file):
if not os.path.isfile(target):
return False
target_date = os.path.getmtime(target)
crt_date = os.path.getmtime(file)
return target_date >= crt_date
# @brief convert domain to idna representation (if applicable
def idna_convert(domain):
try:
if any(ord(c) >= 128 for c in domain):
# Translate IDNA domain name from a unicode domain (handle wildcards separately)
if domain.startswith('*.'):
idna_domain = "*.{}".format(domain[2:].encode('idna').decode('ascii'))
else:
idna_domain = domain.encode('idna').decode('ascii')
return idna_domain
except Exception as e:
log("Unicode domain(s) found but IDNA names could not be translated due to error: {}".format(e), error=True)
return domain
# @brief validate the OCSP status for a given certificate by the given issuer
def is_ocsp_valid(cert, issuer, hash_algo):
if hash_algo == 'sha1':
algorithm = hashes.SHA1
elif hash_algo == 'sha224':
algorithm = hashes.SHA224
elif hash_algo == 'sha256':
algorithm = hashes.SHA256
elif hash_algo == 'sha385':
algorithm = hashes.SHA384
elif hash_algo == 'sha512':
algorithm = hashes.SHA512
else:
log("Invalid hash algorithm '{}' used for OCSP validation. Validation ignored.".format(hash_algo), warning=True)
return True
if isinstance(issuer, list):
issuer = issuer[0] # First certificate in the CA chain is the immediate issuer
try:
ocsp_urls = []
aia = cert.extensions.get_extension_for_oid(ExtensionOID.AUTHORITY_INFORMATION_ACCESS)
for data in aia.value:
if data.access_method == x509.OID_OCSP:
ocsp_urls.append(data.access_location.value)
# This is a bit of a hack due to validation problems within cryptography (TODO: Check if this is still true)
# Correct replacement: ocsprequest = ocsp.OCSPRequestBuilder().add_certificate(cert, issuer, algorithm).build()
ocsprequest = ocsp.OCSPRequestBuilder((cert, issuer, algorithm)).build()
ocsprequestdata = ocsprequest.public_bytes(serialization.Encoding.DER)
for ocsp_url in ocsp_urls:
response = get_url(ocsp_url,
ocsprequestdata,
{
'Accept': 'application/ocsp-response',
'Content-Type': 'application/ocsp-request',
})
ocspresponsedata = response.read()
ocspresponse = ocsp.load_der_ocsp_response(ocspresponsedata)
if ocspresponse.response_status == ocsp.OCSPResponseStatus.SUCCESSFUL \
and ocspresponse.certificate_status == ocsp.OCSPCertStatus.REVOKED:
return False
except Exception as e:
log("An exception occurred during OCSP validation (Validation will be ignored): {}".format(e), error=True)
return True
| moepman/acertmgr | acertmgr/tools.py | Python | isc | 18,467 | 0.003466 |
#!/usr/bin/env python
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daemon
import logging
import os
import sys
import extras
import fixtures
import testtools
from tests.base import iterate_timeout
# as of python-daemon 1.6 it doesn't bundle pidlockfile anymore
# instead it depends on lockfile-0.9.1 which uses pidfile.
pid_file_module = extras.try_imports(['daemon.pidlockfile', 'daemon.pidfile'])
def daemon_test(pidfile, flagfile):
pid = pid_file_module.TimeoutPIDLockFile(pidfile, 10)
with daemon.DaemonContext(pidfile=pid):
for x in iterate_timeout(30, "flagfile to be removed"):
if not os.path.exists(flagfile):
break
sys.exit(0)
class TestDaemon(testtools.TestCase):
log = logging.getLogger("zuul.test.daemon")
def setUp(self):
super(TestDaemon, self).setUp()
self.test_root = self.useFixture(fixtures.TempDir(
rootdir=os.environ.get("ZUUL_TEST_ROOT"))).path
def test_daemon(self):
pidfile = os.path.join(self.test_root, "daemon.pid")
flagfile = os.path.join(self.test_root, "daemon.flag")
open(flagfile, 'w').close()
if not os.fork():
self._cleanups = []
daemon_test(pidfile, flagfile)
for x in iterate_timeout(30, "daemon to start"):
if os.path.exists(pidfile):
break
os.unlink(flagfile)
for x in iterate_timeout(30, "daemon to stop"):
if not os.path.exists(pidfile):
break
| wikimedia/integration-zuul | tests/test_daemon.py | Python | apache-2.0 | 2,085 | 0 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class PythonFrame(object):
"""frame backend using a Python objects: pyspark.rdd.RDD, [(str, dtype), (str, dtype), ...]"""
def __init__(self, rdd, schema=None):
self.rdd = rdd
self.schema = schema
| trustedanalytics/spark-tk | python/sparktk/frame/pyframe.py | Python | apache-2.0 | 926 | 0.001179 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
VERSION = (1, 0, 8, 'final')
__version__ = VERSION
def get_version():
version = '{}.{}'.format(VERSION[0], VERSION[1])
if VERSION[2]:
version = '{}.{}'.format(version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '{} pre-alpha'.format(version)
else:
if VERSION[3] != 'final':
version = '{} {}'.format(version, VERSION[3])
return version
| quiqueporta/django-admin-dialog | django_admin_dialog/__init__.py | Python | gpl-2.0 | 502 | 0 |
from flask import Flask
app = Flask(__name__)
import views | miqueiaspenha/gerenciadordeprovas | quiz/__init__.py | Python | gpl-2.0 | 60 | 0.05 |
from django.db import models
class SpectralTemplate(models.Model):
name = models.CharField(max_length=10)
path = models.CharField(max_length=100)
def __str__(self):
return self.name
class PhotometricFilter(models.Model):
name = models.CharField(max_length=10)
path = models.CharField(max_length=100)
cwl = models.FloatField()
width = models.FloatField()
lambda_b = models.FloatField()
lambda_e = models.FloatField()
mvega = models.FloatField()
fvega = models.FloatField()
def __str__(self):
return self.name
class VPHSetup(models.Model):
name = models.CharField(max_length=10)
fwhm = models.FloatField()
dispersion = models.FloatField()
deltab = models.FloatField()
lambdac = models.FloatField()
relatedband = models.CharField(max_length=10)
lambda_b = models.FloatField()
lambda_e = models.FloatField()
specconf = models.CharField(max_length=10)
def __str__(self):
return self.name
| sergiopasra/django-megaraetc | etc/models.py | Python | gpl-3.0 | 1,005 | 0.002985 |
game_type = 'input_output'
parameter_list = [['$x1','string'], ['$y0','string']]
tuple_list = [
['KnR_1-10_',[None,None]]
]
global_code_template = '''\
d #include <stdio.h>
x #include <stdio.h>
dx #define MAXLINE 1000 /* maximum input line length */
dx
dx int max; /* maximum length seen so far */
dx char line[MAXLINE]; /* current input line */
dx char longest[MAXLINE]; /* longest line saved here */
dx
dx int my_getline(void);
dx void copy(void);
dx
dx /* my_getline: specialized version */
dx int my_getline(void)
dx {
dx int c, i;
dx extern char line[];
dx
dx for (i = 0; i < MAXLINE - 1
dx && (c=getchar()) != EOF && c != '\\n'; ++i)
dx line[i] = c;
dx if (c == '\\n') {
dx line[i] = c;
dx ++i;
dx }
dx line[i] = '\\0';
dx return i;
dx }
dx
dx /* copy: specialized version */
dx void copy(void)
dx {
dx int i;
dx extern char line[], longest[];
dx
dx i = 0;
dx while ((longest[i] = line[i]) != '\\0')
dx ++i;
dx }
dx
dx /* print longest input line; specialized version */
'''
main_code_template = '''\
dx int len;
dx extern int max;
dx extern char longest[];
dx
dx max = 0;
dx while ((len = my_getline()) > 0)
dx if (len > max) {
dx max = len;
dx copy();
dx }
dx if (max > 0) /* there was a line */
dx printf("%s", longest);
'''
argv_template = ''
stdin_template = '''
a
$x1
abc
'''
stdout_template = '''\
$y0
'''
| stryder199/RyarkAssignments | Assignment2/ttt/archive/_old/KnR/KnR_1-10.py | Python | mit | 1,389 | 0.009359 |
import json
import pytest
from yelp_beans.logic.secret import get_secret
def test_get_secret_file(tmpdir, database):
with tmpdir.as_cwd():
expected = 'password'
with open(tmpdir.join('client_secrets.json').strpath, 'w') as secrets:
secret = {'secret': expected}
secrets.write(json.dumps(secret))
actual = get_secret('secret')
assert expected == actual
def test_get_secret_file_no_exist(tmpdir, database):
with tmpdir.as_cwd():
with pytest.raises(IOError):
assert get_secret('secret')
| Yelp/beans | api/tests/logic/secret_test.py | Python | mit | 573 | 0 |
# Copyright (c) 2013-2014 Lingpeng Kong
# All Rights Reserved.
#
# This file is part of TweeboParser 1.0.
#
# TweeboParser 1.0 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TweeboParser 1.0 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with TweeboParser 1.0. If not, see <http://www.gnu.org/licenses/>.
# Lingpeng Kong, lingpenk@cs.cmu.edu
# Oct 12, 2013
# The Brown Clustering usage for Dependency Parsing can be read from Koo et al (ACL 08)
# http://people.csail.mit.edu/maestro/papers/koo08acl.pdf
# Oct 27, 2013
# Add case-sensitive choice
# Jan 4, 2014
# Add 4 bits, 6 bits and all bits.
# May 24, 2014
# Add codecs to support utf-8
import sys
import codecs
def usage():
print("Usage: AugumentBrownClusteringFeature.py [Brown_Clustering_Dictionary] " \
"[Input_Conll_File] [Y/N(case-sensitive)] > [Output_file]")
print("Example: AugumentBrownClusteringFeature.py paths input.txt > output.txt")
print("The program will add two kind of Strings at the end, the first one is the first 4 " \
"bit of the Brown Cluster label and the second one is the whole Brown Cluster label.")
if __name__ == "__main__":
if len(sys.argv) != 4:
usage()
sys.exit(2)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stderr = codecs.getwriter('utf-8')(sys.stderr)
brown_dict = dict()
brown_file = open(sys.argv[1].strip(), "r")
for line in brown_file:
line = line.strip()
if line == "":
continue
bl = line.split("\t")
brown_dict[bl[1]] = bl[0]
#print brown_dict['upstage/downstage']
inputf = sys.argv[2].strip()
for line in codecs.open(inputf, "r", "utf-8"):
line = line.strip()
if line == "":
sys.stdout.write("\n")
continue
cvlist = line.split("\t")
if sys.argv[3] == "N":
brown = brown_dict.get(cvlist[1].lower().strip(), 'OOV')
else:
brown = brown_dict.get(cvlist[1].strip(), 'OOV')
b4 = brown[:4] if len(brown) >= 4 else brown
b6 = brown[:6] if len(brown) >= 6 else brown
cvlist.append(b4)
cvlist.append(b6)
cvlist.append(brown)
tline = ""
for ele in cvlist:
tline = tline + ele + "\t"
tline = tline[:len(tline) - 1]
print(tline)
| ikekonglp/TweeboParser | scripts/AugumentBrownClusteringFeature46.py | Python | gpl-3.0 | 2,802 | 0.003212 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('postcode_api', '0010_auto_20150601_1513'),
]
operations = [
migrations.AlterIndexTogether(
name='address',
index_together=set([('postcode_index', 'uprn')]),
),
]
| ministryofjustice/postcodeinfo | postcodeinfo/apps/postcode_api/migrations/0011_auto_20150702_1812.py | Python | mit | 394 | 0 |
import sys
import os.path
import logging
import ply.yacc
from rightarrow.annotations import *
from rightarrow.lexer import Lexer
logger = logging.getLogger(__name__)
class Parser(object):
tokens = Lexer.tokens
def __init__(self, debug=False, lexer_class=None):
self.debug = debug
self.lexer_class = lexer_class or Lexer # Crufty but works around statefulness in PLY
def parse(self, string, lexer = None):
lexer = lexer or self.lexer_class()
return self.parse_token_stream(lexer.tokenize(string))
def parse_token_stream(self, token_iterator, start_symbol='ty'):
# Since PLY has some crufty aspects and dumps files, we try to keep them local
# However, we need to derive the name of the output Python file :-/
output_directory = os.path.dirname(__file__)
try:
module_name = os.path.splitext(os.path.split(__file__)[1])[0]
except:
module_name = __name__
parsing_table_module = '_'.join([module_name, start_symbol, 'parsetab'])
# And we regenerate the parse table every time; it doesn't actually take that long!
new_parser = ply.yacc.yacc(module=self,
debug=self.debug,
tabmodule = parsing_table_module,
outputdir = output_directory,
write_tables=0,
start = start_symbol,
errorlog = logger)
return new_parser.parse(lexer = IteratorToTokenStream(token_iterator))
# ===================== PLY Parser specification =====================
precedence = [
('right', 'ARROW'),
('left', '|'),
]
def p_error(self, t):
raise Exception('Parse error at %s:%s near token %s (%s)' % (t.lineno, t.col, t.value, t.type))
def p_empty(self, p):
'empty :'
pass
def p_ty_parens(self, p):
"ty : '(' ty ')'"
p[0] = p[2]
def p_ty_var(self, p):
"ty : TYVAR"
p[0] = Variable(p[1])
def p_ty_union(self, p):
"ty : ty '|' ty"
p[0] = Union([p[1], p[3]])
def p_ty_bare(self, p):
"ty : bare_arg_ty"
p[0] = p[1]
def p_ty_funty_bare(self, p):
"ty : ty ARROW ty"
p[0] = Function(arg_types=[p[1]], return_type=p[3])
def p_ty_funty_complex(self, p):
"ty : '(' maybe_arg_types ')' ARROW ty"
argument_types=p[2]
return_type=p[5]
# Check here whether too many kwarg or vararg types are present
# Each item in the list uses the dictionary encoding of tagged variants
arg_types = [argty['arg_type'] for argty in argument_types if 'arg_type' in argty]
vararg_types = [argty['vararg_type'] for argty in argument_types if 'vararg_type' in argty]
kwarg_types = [argty['kwarg_type'] for argty in argument_types if 'kwarg_type' in argty]
if len(vararg_types) > 1:
raise Exception('Argument list with multiple vararg types: %s' % argument_types)
if len(kwarg_types) > 1:
raise Exception('Argument list with multiple kwarg types: %s' % argument_types)
# All the arguments that are not special
p[0] = Function(arg_types=arg_types,
vararg_type=vararg_types[0] if len(vararg_types) > 0 else None,
kwarg_type=kwarg_types[0] if len(kwarg_types) > 0 else None,
kwonly_arg_types=None,
return_type=return_type)
# Because a bare function type is equivalent to a single argument in parens, it is not
# parsed by this rule
def p_maybe_arg_types(self, p):
'''
maybe_arg_types : arg_types ',' arg_ty
| empty
'''
p[0] = [] if len(p) == 2 else p[1] + [p[3]]
# Executive decision is this: kwargs and varargs get to be elements of this list ANYWHERE
# and we check later, to avoid any parsing issues with commas
def p_arg_types_single(self, p):
'''
arg_types : arg_types ',' arg_ty
| arg_ty
'''
p[0] = [p[1]] if len(p) == 2 else p[1] + [p[3]]
def p_arg_ty_normal(self, p):
"arg_ty : ty"
p[0] = { 'arg_type' : p[1] }
def p_arg_ty_vararg(self, p):
"arg_ty : '*' ty"
p[0] = { 'vararg_type' : p[2] }
def p_arg_ty_kwarg(self, p):
"arg_ty : KWARG ty"
p[0] = { 'kwarg_type' : p[2] }
# Special types that never require parenthesis
def p_bare_arg_ty(self, p):
"""
bare_arg_ty : identifier_ty
| dict_ty
| list_ty
| object_ty
| any_ty
"""
p[0] = p[1]
def p_identifier_ty(self, p):
"identifier_ty : ID"
p[0] = NamedType(p[1])
def p_list_ty(self, p):
"list_ty : '[' ty ']'"
p[0] = List(elem_ty=p[2])
def p_dict_ty(self, p):
"dict_ty : '{' ty ':' ty '}'"
p[0] = Dict(key_ty=p[2], value_ty=p[4])
def p_any_ty(self, p):
"any_ty : ANY"
p[0] = Any()
def p_object_ty(self, p):
"""
object_ty : OBJECT '(' ID ')'
| OBJECT '(' ID ',' obj_fields ')'
"""
field_types = {} if len(p) == 5 else p[5]
p[0] = Object(p[3], **field_types)
def p_obj_fields(self, p):
"""
obj_fields : obj_fields ',' obj_field
| obj_field
"""
p[0] = dict([p[1]] if len(p) == 2 else p[1] + [p[3]]) # Note: no checking for dupe fields at the moment
def p_obj_field(self, p):
"obj_field : ID ':' ty"
p[0] = (p[1], p[3])
class IteratorToTokenStream(object):
def __init__(self, iterator):
self.iterator = iterator
def token(self):
try:
return self.iterator.next()
except StopIteration:
return None
if __name__ == '__main__':
logging.basicConfig()
parser = Parser(debug=True)
print parser.parse(sys.stdin.read())
| kennknowles/python-rightarrow | rightarrow/parser.py | Python | apache-2.0 | 6,194 | 0.008072 |