code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
__source__ = 'https://leetcode.com/problems/populating-next-right-pointers-in-each-node-ii/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/populating-next-right-pointers-in-each-node-ii.py
# Time: O(n)
# Space: O(1)
# BFS
#
# Description: Leetcode # 117. Populating Next Right Pointers in Each Node II
#
# Follow up for problem "Populating Next Right Pointers in Each Node".
#
# What if the given tree could be any binary tree? Would your previous solution still work?
#
# Note:
#
# You may only use constant extra space.
# For example,
# Given the following binary tree,
# 1
# / \
# 2 3
# / \ \
# 4 5 7
# After calling your function, the tree should look like:
# 1 -> NULL
# / \
# 2 -> 3 -> NULL
# / \ \
# 4-> 5 -> 7 -> NULL
#
# Companies
# Microsoft Bloomberg Facebook
# Related Topics
# Tree Depth-first Search
# Similar Questions
# Populating Next Right Pointers in Each Node
#
import unittest
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
def __repr__(self):
if self is None:
return "Nil"
else:
return "{} -> {}".format(self.val, repr(self.next))
# 84ms 18.10%
class Solution:
# @param root, a tree node
# @return nothing
def connect(self, root):
head = root
while head:
prev, cur, next_head = None, head, None
while cur:
if next_head is None:
if cur.left:
next_head= cur.left
elif cur.right:
next_head = cur.right
if cur.left:
if prev:
prev.next = cur.left
prev = cur.left
if cur.right:
if prev:
prev.next = cur.right
prev = cur.right
cur = cur.next
head = next_head
class SolutionOther:
# @param root, a tree node
# @return nothing
def connect(self, root):
curr = root
while curr:
firstNodeInNextLevel = None
prev = None
while curr:
if not firstNodeInNextLevel:
firstNodeInNextLevel = curr.left if curr.left else curr.right
if curr.left:
if prev:
prev.next = curr.left
prev = curr.left
if curr.right:
if prev:
prev.next = curr.right
prev = curr.right
curr = curr.next
curr = firstNodeInNextLevel
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
#create Tree
root, root.left, root.right = TreeNode(1), TreeNode(2), TreeNode(3)
root.left.left, root.left.right, root.right.right = TreeNode(4), TreeNode(5), TreeNode(7)
#test
test = SolutionOther()
test.connect(root)
Solution().connect(root)
print root
print root.left
print root.left.left
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# 1ms 86.28%
class Solution {
//hint to have pre point at root.left
public void connect(TreeLinkNode root) {
TreeLinkNode dummyHead = new TreeLinkNode(0);
TreeLinkNode pre = dummyHead;
while (root != null) {
if (root.left != null) {
pre.next = root.left;
pre = pre.next;
}
if (root.right != null) {
pre.next = root.right;
pre = pre.next;
}
root = root.next;
if (root == null) {
pre = dummyHead;
root = dummyHead.next;
dummyHead.next = null;
}
}
}
}
/**
* Definition for binary tree with next pointer.
* public class TreeLinkNode {
* int val;
* TreeLinkNode left, right, next;
* TreeLinkNode(int x) { val = x; }
* }
*/
# 0ms 100%
class Solution {
public void connect(TreeLinkNode root) {
while(root != null){
TreeLinkNode tempChild = new TreeLinkNode(0);
TreeLinkNode currentChild = tempChild;
while(root!=null){
if(root.left != null) { currentChild.next = root.left; currentChild = currentChild.next;}
if(root.right != null) { currentChild.next = root.right; currentChild = currentChild.next;}
root = root.next;
}
root = tempChild.next;
}
}
}
''' | JulyKikuAkita/PythonPrac | cs15211/PopulatingNextRightPointersinEachNodeII.py | Python | apache-2.0 | 4,795 |
import mechanize
br = mechanize.Browser()
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
#br.set_debug_http(True)
#br.set_debug_redirects(True)
#br.set_debug_responses(True)
import cookielib
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]
r = br.open('http://google.com')
html = r.read()
# Show the html title
print br.title()
# Show the available forms
for f in br.forms():
print f
# Select the first (index zero) form
br.select_form(nr=0)
# Let's search
br.form['q']='data science'
r = br.submit()
html = r.read()
from bs4 import BeautifulSoup
soup = BeautifulSoup(html)
nres = soup.find('div',attrs={'id':'resultStats'}).text
wlinks = [ w for w in br.links(url_regex='wikipedia') ]
# Looking at some results in link format
for l in wlinks:
print l.url
w0 = wlinks[0]
r = br.open(w0.url)
print br.title()
print br.geturl()
html = r.read()
soup = BeautifulSoup(html)
a_li = soup.select('.interlanguage-link')
print ('\n'.join([ li.a['lang']+" "+li.text for li in a_li])).encode('utf-8')
| exedre/webscraping-course-2014 | esempi/mechanize1.py | Python | gpl-2.0 | 1,386 |
###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
import unittest
from diffcalc.gdasupport.scannable.parameter import \
DiffractionCalculatorParameter
from test.diffcalc.gdasupport.scannable.mockdiffcalc import \
MockParameterManager
class TestDiffractionCalculatorParameter(object):
def setup_method(self):
self.dcp = DiffractionCalculatorParameter('dcp', 'betain',
MockParameterManager())
def testAsynchronousMoveToAndGetPosition(self):
self.dcp.asynchronousMoveTo(12.3)
assert self.dcp.getPosition() == [12.3,]
def testIsBusy(self):
assert not self.dcp.isBusy()
| DiamondLightSource/diffcalc | test/diffcalc/gdasupport/scannable/test_parameter.py | Python | gpl-3.0 | 1,337 |
#!/usr/bin/python2.7
import sys
import csv
import yaml
import codecs
TO_BE_TRANSLATED_MARK = "***TO BE TRANSLATED***"
def collect(result, node, prefix=None):
for key,value in node.items():
new_prefix = (key if prefix == None else prefix + "." + key)
if isinstance(value, dict):
collect(result, value, new_prefix)
else:
result[new_prefix] = value
def collect_old_csv(filename):
result = {}
reader = csv.reader(open(filename))
for row in reader:
if TO_BE_TRANSLATED_MARK not in row[1]:
result[row[0]] = row[1].decode("utf-8")
return result
def flatten(namespace=None,old_csv=None):
namespace = "" if namespace == None else namespace + "."
en_src = yaml.load(open("%sen.yml" % namespace))
ja_src = yaml.load(open("%sja.yml" % namespace))
en = {}
collect(en, en_src["en"])
ja = {}
collect(ja, ja_src["ja"])
ja_old = collect_old_csv(old_csv) if old_csv else {}
writer = csv.writer(sys.stdout)
for key,value in sorted(en.items()):
val = TO_BE_TRANSLATED_MARK + value
if key in ja: val = ja[key]
elif key in ja_old: val = ja_old[key]
writer.writerow([key, val.encode("UTF-8")])
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: yaml2csv.py namespace('server'|'client') [old-translated-csv-file]"
sys.exit(1)
flatten(sys.argv[1], None if len(sys.argv) < 3 else sys.argv[2])
| shimarin/discourse-ja-translation | yaml2csv.py | Python | gpl-2.0 | 1,474 |
#~ import sensors
import time
HAS_REAL_ROBOT = True
HAS_RAZOR = False
#############
# PARAMATERS
#############
### Robot config ###
FULL_POPPY_HUMANOID = True
robot_configuration_file = "/home/poppy/poppy.json" #Needed only if FULL_POPPY_HUMANOID is False
###
#TODO: when we have several functions of that type, create a utils scipt
def createPoppyCreature():
poppy = None
if FULL_POPPY_HUMANOID:
from poppy_humanoid import PoppyHumanoid
try:
poppy = PoppyHumanoid()
except Exception,e:
print "could not create poppy object"
print e
else:
import pypot.robot
try:
with open(robot_configuration_file) as f:
poppy_config = json.load(f)
poppy = pypot.robot.from_config(poppy_config)
poppy.start_sync()
except Exception,e:
print "could not create poppy object"
print e
return poppy
#############
## MAIN
#############
razor = None
if HAS_RAZOR:
from sensors import razor
razor = razor.Razor()
razor.start()
if HAS_REAL_ROBOT:
poppy = createPoppyCreature()
for m in poppy.motors:
m.compliant = False
#~ m.torque_limit = 80
time.sleep(0.5)
for m in poppy.motors:
m.goto_position(0.0, 1., wait=False)
#~ time.sleep(2)
else:
poppy = None
#~ import Walker4 as Walker
import Walker
walker = Walker.Walker(poppy, razor)
if walker.init():
walker.startWalk()
#~ for i in range(0,4):
while not walker.mustStopWalk():
walker.oneStep()
walker.stopWalk()
walker.clean()
if HAS_REAL_ROBOT:
time.sleep(0.5)
for m in poppy.motors:
m.compliant = True
time.sleep(0.5)
if not FULL_POPPY_HUMANOID:
poppy.stop_sync()
poppy.close()
if HAS_RAZOR:
razor.stop()
| HumaRobotics/poppy-walk | main.py | Python | gpl-2.0 | 1,936 |
"""
Module contains class needed for adding and compressing data from directory.
"""
import os
import os.path as op
import re
class VersionsProcessor(object):
def __init__(self, directory_processor):
self._directory_processor = directory_processor
def setDirectoryProcessor(self, directory_processor):
self._directory_processor = directory_processor
def _natural_sort_key(self, (_, directory)):
return [int(s) if s.isdigit() else s
for s in re.split(r'(\d+)', directory)]
def runSimulation(self, directory):
"""Processes directory with versions as subdirectories.
Args:
directory: directory with versions.
Returns:
pairs (version directory, compressed data representing the version)
Raises:
ChunkerException in case of errors during communication.
OSError
IOError
"""
all_files = [(op.join(directory, file_name), file_name)
for file_name in os.listdir(directory)]
all_dirs = sorted([(x, y) for (x, y) in all_files if op.isdir(x)],
key=self._natural_sort_key)
logger = self._directory_processor.getLogger()
for full_path, version_dir in all_dirs:
if logger:
before_blocks = logger.getTotalBlocks()
before_dups = logger.getDuplicates()
processed = self._directory_processor.processDirectory(full_path)
if logger:
new_blocks = logger.getTotalBlocks() - before_blocks
duplicates = logger.getDuplicates() - before_dups
percent = "{0:.2f}%".format(float(duplicates) * 100 /
(duplicates + new_blocks))
yield (version_dir + " " + percent +
" duplicates", processed)
else:
yield (version_dir, processed)
| zpp-2014-15/deltacompression | deltacompression/backend/versions_processor.py | Python | mit | 1,968 |
#!/usr/bin/env python3
import sys
def main(args):
#cnt = 2017
cnt = 50000000
n = 3
n = 345
size = 0
buf = [0]
pos = 0
at_1 = None
for i in range(cnt):
pos = (pos + n) % (i+1)
if pos == 0:
at_1 = i+1
pos += 1
print(at_1)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| msullivan/advent-of-code | 2017/17b.py | Python | mit | 358 |
from parseJSONdata import parseData
# Edit Here
negativeFileName = 'md_neg_'
postiveFileName = 'md_pos_'
neutralFileName = 'md_neu_'
numNegativ = 20
numPositive = 1
numNeutral = 3
parseData(negativeFileName, postiveFileName, neutralFileName, numNegativ, numPositive, numNeutral) | cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price | data/17 mcdonalds/mddataParse.py | Python | mit | 280 |
#!/bin/env python
# -*- coding: utf-8 -*-
"""
服务器4进程,开4个端口
客户端8进程,每进程40000次echo请求
real 0m19.643s
user 0m24.450s
sys 0m6.231s
QPS=32w/20s=1.6w
"""
import socket
import time
from threading import Thread
from multiprocessing import Process
import sys
sys.path.append('../../src/util/')
from pys_define import *
from pys_package import *
from pys_common import *
from pys_raw_socket import *
process_cnt =1
total_req_cnt =1
server_port_cnt =1
req_echo ={u'cmd':u'echo',u'data':u'hello,world!'}
req_login ={'cmd':'login','name':u'zhangsan'.encode(u'UTF-8'),u'pwd':u'123456'}
req_uplocation ={u'cmd':u'uplocation',u'uid':1024,'x':'100.101','y':'200.202'}
reqs={u'echo':req_echo,u'login':req_login,u'uplocation':req_uplocation}
def Run(sock,sessionkey,cmd,showlog):
req =reqs[cmd]
data =SerializeFilter(req,sessionkey)
data =PackPackage(data)
n =0
while n<total_req_cnt:
n +=1
sock.sendall(data)
if showlog:
print 'sent: n=%d,len(data)=%d,req=%s'%(n,len(data),req)
header =recvall(sock,PYS_HEAD_LEN)
if len(header) <1:
print 'peer closed this session when recv header!'
break
bodyLen =GetBodyLenByHeader(header)
recvdata =recvall(sock,bodyLen)
if len(recvdata) <1:
print 'peer closed this session when recv body!'
break
rsp =DeserializeFilter(recvdata,sessionkey)
if showlog:
print 'recved: n=%d,len(data)=%d,rsp=%s'%(n,PYS_HEAD_LEN+len(recvdata),rsp)
def Entry(id,showlog):
# echo/login/uplocation
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1',8600+id%server_port_cnt))
initsession =GenInitSessionData()
sessionkey =initsession['sessionkey']
data =PackInitSession(initsession)
data =PackPackage(data)
sock.sendall(data)
# print 'sent InitSession: len(data)=%d'%(len(data))
Run(sock,sessionkey,'login',showlog)
print 'login finished.'
time.sleep(16)
Run(sock,sessionkey,'uplocation',showlog)
print 'uplocation finished.'
time.sleep(16)
print 'ready to close.'
sock.close()
if __name__ == '__main__':
showlog =1
process_cnt =1
total_req_cnt =1
server_port_cnt =1
# 多进程
l = [ Process(target = Entry, args=(i,showlog)) for i in xrange(0,process_cnt) ]
# 多线程
#l = [ Thread(target = Run) for i in xrange(0,8) ]
for i in l:
i.start()
for i in l:
i.join()
| dungeonsnd/test-code | dev_examples/pyserver/test/client/client_procedure.py | Python | gpl-3.0 | 2,598 |
#!/usr/bin/python2
import unittest
from ipapython.dn import *
def expected_class(klass, component):
if klass is AVA:
if component == 'self':
return AVA
elif klass is RDN:
if component == 'self':
return RDN
elif component == 'AVA':
return AVA
elif klass is DN:
if component == 'self':
return DN
elif component == 'AVA':
return AVA
elif component == 'RDN':
return RDN
raise ValueError("class %s with component '%s' unknown" % (klass.__name__, component))
class TestAVA(unittest.TestCase):
def setUp(self):
self.attr1 = 'cn'
self.value1 = 'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.attr2 = 'ou'
self.value2 = 'People'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.attr3 = 'c'
self.value3 = 'US'
self.str_ava3 = '%s=%s' % (self.attr3, self.value3)
self.ava3 = AVA(self.attr3, self.value3)
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with attr,value pair
ava1 = AVA(self.attr1, self.value1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with "attr=value" string
ava1 = AVA(self.str_ava1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with tuple (attr, value)
ava1 = AVA((self.attr1, self.value1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with list [attr, value]
ava1 = AVA([self.attr1, self.value1])
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1, self.ava1)
# Create with no args should fail
with self.assertRaises(TypeError):
AVA()
# Create with more than 3 args should fail
with self.assertRaises(TypeError):
AVA(self.attr1, self.value1, self.attr1, self.attr1)
# Create with 1 arg which is not string should fail
with self.assertRaises(TypeError):
AVA(1)
# Create with malformed AVA string should fail
with self.assertRaises(ValueError):
AVA("cn")
# Create with non-string parameters, should convert
ava1 = AVA(1, self.value1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.attr, u'1')
ava1 = AVA((1, self.value1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.attr, u'1')
ava1 = AVA(self.attr1, 1)
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.value, u'1')
ava1 = AVA((self.attr1, 1))
self.assertExpectedClass(AVA, ava1, 'self')
self.assertEqual(ava1.value, u'1')
def test_indexing(self):
ava1 = AVA(self.ava1)
self.assertEqual(ava1[self.attr1], self.value1)
self.assertEqual(ava1[0], self.attr1)
self.assertEqual(ava1[1], self.value1)
with self.assertRaises(KeyError):
ava1['foo']
with self.assertRaises(KeyError):
ava1[3]
def test_properties(self):
ava1 = AVA(self.ava1)
self.assertEqual(ava1.attr, self.attr1)
self.assertIsInstance(ava1.attr, unicode)
self.assertEqual(ava1.value, self.value1)
self.assertIsInstance(ava1.value, unicode)
def test_str(self):
ava1 = AVA(self.ava1)
self.assertEqual(str(ava1), self.str_ava1)
self.assertIsInstance(str(ava1), str)
def test_cmp(self):
# Equality
ava1 = AVA(self.attr1, self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
self.assertTrue(ava1 == self.str_ava1)
self.assertFalse(ava1 != self.str_ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Upper case attr should still be equal
ava1 = AVA(self.attr1.upper(), self.value1)
self.assertFalse(ava1.attr == self.attr1)
self.assertTrue(ava1.value == self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Upper case value should still be equal
ava1 = AVA(self.attr1, self.value1.upper())
self.assertTrue(ava1.attr == self.attr1)
self.assertFalse(ava1.value == self.value1)
self.assertTrue(ava1 == self.ava1)
self.assertFalse(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Make ava1's attr greater
with self.assertRaises(AttributeError):
ava1.attr = self.attr1 + "1"
ava1 = AVA(self.attr1 + "1", self.value1.upper())
self.assertFalse(ava1 == self.ava1)
self.assertTrue(ava1 != self.ava1)
result = cmp(ava1, self.ava1)
self.assertEqual(result, 1)
result = cmp(self.ava1, ava1)
self.assertEqual(result, -1)
# Reset ava1's attr, should be equal again
with self.assertRaises(AttributeError):
ava1.attr = self.attr1
ava1 = AVA(self.attr1, self.value1.upper())
result = cmp(ava1, self.ava1)
self.assertEqual(result, 0)
# Make ava1's value greater
# attr will be equal, this tests secondary comparision component
with self.assertRaises(AttributeError):
ava1.value = self.value1 + "1"
ava1 = AVA(self.attr1, self.value1 + "1")
result = cmp(ava1, self.ava1)
self.assertEqual(result, 1)
result = cmp(self.ava1, ava1)
self.assertEqual(result, -1)
def test_hashing(self):
# create AVA's that are equal but differ in case
ava1 = AVA((self.attr1.lower(), self.value1.upper()))
ava2 = AVA((self.attr1.upper(), self.value1.lower()))
# AVAs that are equal should hash to the same value.
self.assertEqual(ava1, ava2)
self.assertEqual(hash(ava1), hash(ava2))
# Different AVA objects with the same value should
# map to 1 common key and 1 member in a set. The key and
# member are based on the object's value.
ava1_a = AVA(self.ava1)
ava1_b = AVA(self.ava1)
ava2_a = AVA(self.ava2)
ava2_b = AVA(self.ava2)
ava3_a = AVA(self.ava3)
ava3_b = AVA(self.ava3)
self.assertEqual(ava1_a, ava1_b)
self.assertEqual(ava2_a, ava2_b)
self.assertEqual(ava3_a, ava3_b)
d = dict()
s = set()
d[ava1_a] = str(ava1_a)
d[ava1_b] = str(ava1_b)
d[ava2_a] = str(ava2_a)
d[ava2_b] = str(ava2_b)
s.add(ava1_a)
s.add(ava1_b)
s.add(ava2_a)
s.add(ava2_b)
self.assertEqual(len(d), 2)
self.assertEqual(len(s), 2)
self.assertEqual(sorted(d), sorted([ava1_a, ava2_a]))
self.assertEqual(sorted(s), sorted([ava1_a, ava2_a]))
self.assertTrue(ava1_a in d)
self.assertTrue(ava1_b in d)
self.assertTrue(ava2_a in d)
self.assertTrue(ava2_b in d)
self.assertFalse(ava3_a in d)
self.assertFalse(ava3_b in d)
self.assertTrue(ava1_a in s)
self.assertTrue(ava1_b in s)
self.assertTrue(ava2_a in s)
self.assertTrue(ava2_b in s)
self.assertFalse(ava3_a in s)
self.assertFalse(ava3_b in s)
class TestRDN(unittest.TestCase):
def setUp(self):
# ava1 must sort before ava2
self.attr1 = 'cn'
self.value1 = 'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.str_rdn1 = '%s=%s' % (self.attr1, self.value1)
self.rdn1 = RDN((self.attr1, self.value1))
self.attr2 = 'ou'
self.value2 = 'people'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.str_rdn2 = '%s=%s' % (self.attr2, self.value2)
self.rdn2 = RDN((self.attr2, self.value2))
self.str_ava3 = '%s=%s+%s=%s' % (self.attr1, self.value1, self.attr2, self.value2)
self.str_rdn3 = '%s=%s+%s=%s' % (self.attr1, self.value1, self.attr2, self.value2)
self.rdn3 = RDN(self.ava1, self.ava2)
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with single attr,value pair
rdn1 = RDN((self.attr1, self.value1))
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with multiple attr,value pairs
rdn3 = RDN((self.attr1, self.value1), (self.attr2, self.value2))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple attr,value pairs passed as lists
rdn3 = RDN([self.attr1, self.value1], [self.attr2, self.value2])
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple attr,value pairs but reverse
# constructor parameter ordering. RDN canonical ordering
# should remain the same
rdn3 = RDN((self.attr2, self.value2), (self.attr1, self.value1))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with single AVA object
rdn1 = RDN(self.ava1)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with multiple AVA objects
rdn3 = RDN(self.ava1, self.ava2)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with multiple AVA objects but reverse constructor
# parameter ordering. RDN canonical ordering should remain
# the same
rdn3 = RDN(self.ava2, self.ava1)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
# Create with single string with 1 AVA
rdn1 = RDN(self.str_rdn1)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1, self.rdn1)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
self.assertEqual(rdn1[0], self.ava1)
# Create with single string with 2 AVA's
rdn3 = RDN(self.str_rdn3)
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[1], self.ava2)
def test_properties(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(rdn1.attr, self.attr1)
self.assertIsInstance(rdn1.attr, unicode)
self.assertEqual(rdn1.value, self.value1)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn2.attr, self.attr2)
self.assertIsInstance(rdn2.attr, unicode)
self.assertEqual(rdn2.value, self.value2)
self.assertIsInstance(rdn2.value, unicode)
self.assertEqual(rdn3.attr, self.attr1)
self.assertIsInstance(rdn3.attr, unicode)
self.assertEqual(rdn3.value, self.value1)
self.assertIsInstance(rdn3.value, unicode)
def test_str(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(str(rdn1), self.str_rdn1)
self.assertIsInstance(str(rdn1), str)
self.assertEqual(str(rdn2), self.str_rdn2)
self.assertIsInstance(str(rdn2), str)
self.assertEqual(str(rdn3), self.str_rdn3)
self.assertIsInstance(str(rdn3), str)
def test_cmp(self):
# Equality
rdn1 = RDN((self.attr1, self.value1))
self.assertTrue(rdn1 == self.rdn1)
self.assertFalse(rdn1 != self.rdn1)
self.assertTrue(rdn1 == self.str_rdn1)
self.assertFalse(rdn1 != self.str_rdn1)
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 0)
# Make rdn1's attr greater
rdn1 = RDN((self.attr1 + "1", self.value1))
self.assertFalse(rdn1 == self.rdn1)
self.assertTrue(rdn1 != self.rdn1)
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 1)
result = cmp(self.rdn1, rdn1)
self.assertEqual(result, -1)
# Reset rdn1's attr, should be equal again
rdn1 = RDN((self.attr1, self.value1))
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 0)
# Make rdn1's value greater
# attr will be equal, this tests secondary comparision component
rdn1 = RDN((self.attr1, self.value1 + "1"))
result = cmp(rdn1, self.rdn1)
self.assertEqual(result, 1)
result = cmp(self.rdn1, rdn1)
self.assertEqual(result, -1)
# Make sure rdn's with more ava's are greater
result = cmp(self.rdn1, self.rdn3)
self.assertEqual(result, -1)
result = cmp(self.rdn3, self.rdn1)
self.assertEqual(result, 1)
def test_indexing(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(rdn1[0], self.ava1)
self.assertEqual(rdn1[self.ava1.attr], self.ava1.value)
with self.assertRaises(KeyError):
rdn1['foo']
self.assertEqual(rdn2[0], self.ava2)
self.assertEqual(rdn2[self.ava2.attr], self.ava2.value)
with self.assertRaises(KeyError):
rdn2['foo']
self.assertEqual(rdn3[0], self.ava1)
self.assertEqual(rdn3[self.ava1.attr], self.ava1.value)
self.assertEqual(rdn3[1], self.ava2)
self.assertEqual(rdn3[self.ava2.attr], self.ava2.value)
with self.assertRaises(KeyError):
rdn3['foo']
self.assertEqual(rdn1.attr, self.attr1)
self.assertEqual(rdn1.value, self.value1)
with self.assertRaises(TypeError):
rdn3[1.0]
# Slices
self.assertEqual(rdn3[0:1], [self.ava1])
self.assertEqual(rdn3[:], [self.ava1, self.ava2])
def test_assignments(self):
rdn = RDN((self.attr1, self.value1))
with self.assertRaises(TypeError):
rdn[0] = self.ava2
def test_iter(self):
rdn1 = RDN(self.rdn1)
rdn2 = RDN(self.rdn2)
rdn3 = RDN(self.rdn3)
self.assertEqual(len(rdn1), 1)
self.assertEqual(rdn1[:], [self.ava1])
for i, ava in enumerate(rdn1):
if i == 0:
self.assertEqual(ava, self.ava1)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn1)))
self.assertEqual(len(rdn2), 1)
self.assertEqual(rdn2[:], [self.ava2])
for i, ava in enumerate(rdn2):
if i == 0:
self.assertEqual(ava, self.ava2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn2)))
self.assertEqual(len(rdn3), 2)
self.assertEqual(rdn3[:], [self.ava1, self.ava2])
for i, ava in enumerate(rdn3):
if i == 0:
self.assertEqual(ava, self.ava1)
elif i == 1:
self.assertEqual(ava, self.ava2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(rdn3)))
def test_concat(self):
rdn1 = RDN((self.attr1, self.value1))
rdn2 = RDN((self.attr2, self.value2))
# in-place addtion
rdn1 += rdn2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
rdn1 = RDN((self.attr1, self.value1))
rdn1 += self.ava2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
rdn1 = RDN((self.attr1, self.value1))
rdn1 += self.str_ava2
self.assertEqual(rdn1, self.rdn3)
self.assertExpectedClass(RDN, rdn1, 'self')
for i in range(0, len(rdn1)):
self.assertExpectedClass(RDN, rdn1[i], 'AVA')
# concatenation
rdn1 = RDN((self.attr1, self.value1))
rdn3 = rdn1 + rdn2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
rdn3 = rdn1 + self.ava2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
rdn3 = rdn1 + self.str_ava2
self.assertEqual(rdn3, self.rdn3)
self.assertExpectedClass(RDN, rdn3, 'self')
for i in range(0, len(rdn3)):
self.assertExpectedClass(RDN, rdn3[i], 'AVA')
def test_hashing(self):
# create RDN's that are equal but differ in case
rdn1 = RDN((self.attr1.lower(), self.value1.upper()))
rdn2 = RDN((self.attr1.upper(), self.value1.lower()))
# RDNs that are equal should hash to the same value.
self.assertEqual(rdn1, rdn2)
self.assertEqual(hash(rdn1), hash(rdn2))
class TestDN(unittest.TestCase):
def setUp(self):
# ava1 must sort before ava2
self.attr1 = 'cn'
self.value1 = 'Bob'
self.str_ava1 = '%s=%s' % (self.attr1, self.value1)
self.ava1 = AVA(self.attr1, self.value1)
self.str_rdn1 = '%s=%s' % (self.attr1, self.value1)
self.rdn1 = RDN((self.attr1, self.value1))
self.attr2 = 'ou'
self.value2 = 'people'
self.str_ava2 = '%s=%s' % (self.attr2, self.value2)
self.ava2 = AVA(self.attr2, self.value2)
self.str_rdn2 = '%s=%s' % (self.attr2, self.value2)
self.rdn2 = RDN((self.attr2, self.value2))
self.str_dn1 = self.str_rdn1
self.dn1 = DN(self.rdn1)
self.str_dn2 = self.str_rdn2
self.dn2 = DN(self.rdn2)
self.str_dn3 = '%s,%s' % (self.str_rdn1, self.str_rdn2)
self.dn3 = DN(self.rdn1, self.rdn2)
self.base_rdn1 = RDN(('dc', 'redhat'))
self.base_rdn2 = RDN(('dc', 'com'))
self.base_dn = DN(self.base_rdn1, self.base_rdn2)
self.container_rdn1 = RDN(('cn', 'sudorules'))
self.container_rdn2 = RDN(('cn', 'sudo'))
self.container_dn = DN(self.container_rdn1, self.container_rdn2)
self.base_container_dn = DN((self.attr1, self.value1),
self.container_dn, self.base_dn)
def assertExpectedClass(self, klass, obj, component):
self.assertIs(obj.__class__, expected_class(klass, component))
def test_create(self):
# Create with single attr,value pair
dn1 = DN((self.attr1, self.value1))
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with single attr,value pair passed as a tuple
dn1 = DN((self.attr1, self.value1))
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Creation with multiple attr,value string pairs should fail
with self.assertRaises(ValueError):
dn1 = DN(self.attr1, self.value1, self.attr2, self.value2)
# Create with multiple attr,value pairs passed as tuples & lists
dn1 = DN((self.attr1, self.value1), [self.attr2, self.value2])
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple attr,value pairs passed as tuple and RDN
dn1 = DN((self.attr1, self.value1), RDN((self.attr2, self.value2)))
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple attr,value pairs but reverse
# constructor parameter ordering. RDN ordering should also be
# reversed because DN's are a ordered sequence of RDN's
dn1 = DN((self.attr2, self.value2), (self.attr1, self.value1))
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn2)
self.assertEqual(dn1[1], self.rdn1)
# Create with single RDN object
dn1 = DN(self.rdn1)
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with multiple RDN objects, assure ordering is preserved.
dn1 = DN(self.rdn1, self.rdn2)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with multiple RDN objects in different order, assure
# ordering is preserved.
dn1 = DN(self.rdn2, self.rdn1)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn2)
self.assertEqual(dn1[1], self.rdn1)
# Create with single string with 1 RDN
dn1 = DN(self.str_rdn1)
self.assertEqual(len(dn1), 1)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
# Create with single string with 2 RDN's
dn1 = DN(self.str_dn3)
self.assertEqual(len(dn1), 2)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
self.assertIsInstance(dn1[i].attr, unicode)
self.assertIsInstance(dn1[i].value, unicode)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[1], self.rdn2)
# Create with RDN, and 2 DN's (e.g. attr + container + base)
dn1 = DN((self.attr1, self.value1), self.container_dn, self.base_dn)
self.assertEqual(len(dn1), 5)
dn_str = ','.join([str(self.rdn1),
str(self.container_rdn1), str(self.container_rdn2),
str(self.base_rdn1), str(self.base_rdn2)])
self.assertEqual(str(dn1), dn_str)
def test_str(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(str(dn1), self.str_dn1)
self.assertIsInstance(str(dn1), str)
self.assertEqual(str(dn2), self.str_dn2)
self.assertIsInstance(str(dn2), str)
self.assertEqual(str(dn3), self.str_dn3)
self.assertIsInstance(str(dn3), str)
def test_cmp(self):
# Equality
dn1 = DN((self.attr1, self.value1))
self.assertTrue(dn1 == self.dn1)
self.assertFalse(dn1 != self.dn1)
self.assertTrue(dn1 == self.str_dn1)
self.assertFalse(dn1 != self.str_dn1)
result = cmp(dn1, self.dn1)
self.assertEqual(result, 0)
# Make dn1's attr greater
with self.assertRaises(AttributeError):
dn1[0].attr = self.attr1 + "1"
dn1 = DN((self.attr1 + "1", self.value1))
self.assertFalse(dn1 == self.dn1)
self.assertTrue(dn1 != self.dn1)
result = cmp(dn1, self.dn1)
self.assertEqual(result, 1)
result = cmp(self.dn1, dn1)
self.assertEqual(result, -1)
# Reset dn1's attr, should be equal again
with self.assertRaises(AttributeError):
dn1[0].attr = self.attr1
dn1 = DN((self.attr1, self.value1))
result = cmp(dn1, self.dn1)
self.assertEqual(result, 0)
# Make dn1's value greater
# attr will be equal, this tests secondary comparision component
with self.assertRaises(AttributeError):
dn1[0].value = self.value1 + "1"
dn1 = DN((self.attr1, self.value1 + "1"))
result = cmp(dn1, self.dn1)
self.assertEqual(result, 1)
result = cmp(self.dn1, dn1)
self.assertEqual(result, -1)
# Make sure dn's with more rdn's are greater
result = cmp(self.dn1, self.dn3)
self.assertEqual(result, -1)
result = cmp(self.dn3, self.dn1)
self.assertEqual(result, 1)
# Test startswith, endswith
container_dn = DN(self.container_dn)
base_container_dn = DN(self.base_container_dn)
self.assertTrue(base_container_dn.startswith(self.rdn1))
self.assertTrue(base_container_dn.startswith(self.dn1))
self.assertTrue(base_container_dn.startswith(self.dn1 + container_dn))
self.assertFalse(base_container_dn.startswith(self.dn2))
self.assertFalse(base_container_dn.startswith(self.rdn2))
self.assertTrue(base_container_dn.startswith((self.dn1)))
self.assertTrue(base_container_dn.startswith((self.rdn1)))
self.assertFalse(base_container_dn.startswith((self.rdn2)))
self.assertTrue(base_container_dn.startswith((self.rdn2, self.rdn1)))
self.assertTrue(base_container_dn.startswith((self.dn1, self.dn2)))
self.assertTrue(base_container_dn.endswith(self.base_dn))
self.assertTrue(base_container_dn.endswith(container_dn + self.base_dn))
self.assertFalse(base_container_dn.endswith(DN(self.base_rdn1)))
self.assertTrue(base_container_dn.endswith(DN(self.base_rdn2)))
self.assertTrue(base_container_dn.endswith((DN(self.base_rdn1), DN(self.base_rdn2))))
# Test "in" membership
self.assertTrue(self.container_rdn1 in container_dn)
self.assertTrue(container_dn in container_dn)
self.assertFalse(self.base_rdn1 in container_dn)
self.assertTrue(self.container_rdn1 in base_container_dn)
self.assertTrue(container_dn in base_container_dn)
self.assertTrue(container_dn + self.base_dn in
base_container_dn)
self.assertTrue(self.dn1 + container_dn + self.base_dn in
base_container_dn)
self.assertTrue(self.dn1 + container_dn + self.base_dn ==
base_container_dn)
self.assertFalse(self.container_rdn1 in self.base_dn)
def test_indexing(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(dn1[0], self.rdn1)
self.assertEqual(dn1[self.rdn1.attr], self.rdn1.value)
with self.assertRaises(KeyError):
dn1['foo']
self.assertEqual(dn2[0], self.rdn2)
self.assertEqual(dn2[self.rdn2.attr], self.rdn2.value)
with self.assertRaises(KeyError):
dn2['foo']
self.assertEqual(dn3[0], self.rdn1)
self.assertEqual(dn3[self.rdn1.attr], self.rdn1.value)
self.assertEqual(dn3[1], self.rdn2)
self.assertEqual(dn3[self.rdn2.attr], self.rdn2.value)
with self.assertRaises(KeyError):
dn3['foo']
with self.assertRaises(TypeError):
dn3[1.0]
def test_assignments(self):
dn = dn2 = DN('t=0,t=1,t=2,t=3,t=4,t=5,t=6,t=7,t=8,t=9')
with self.assertRaises(TypeError):
dn[0] = RDN('t=a')
with self.assertRaises(TypeError):
dn[0:1] = [RDN('t=a'), RDN('t=b')]
def test_iter(self):
dn1 = DN(self.dn1)
dn2 = DN(self.dn2)
dn3 = DN(self.dn3)
self.assertEqual(len(dn1), 1)
self.assertEqual(dn1[:], self.rdn1)
for i, ava in enumerate(dn1):
if i == 0:
self.assertEqual(ava, self.rdn1)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(self.rdn1)))
self.assertEqual(len(dn2), 1)
self.assertEqual(dn2[:], self.rdn2)
for i, ava in enumerate(dn2):
if i == 0:
self.assertEqual(ava, self.rdn2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(self.rdn2)))
self.assertEqual(len(dn3), 2)
self.assertEqual(dn3[:], DN(self.rdn1, self.rdn2))
for i, ava in enumerate(dn3):
if i == 0:
self.assertEqual(ava, self.rdn1)
elif i == 1:
self.assertEqual(ava, self.rdn2)
else:
self.fail("got iteration index %d, but len=%d" % (i, len(dn3)))
def test_concat(self):
dn1 = DN((self.attr1, self.value1))
dn2 = DN([self.attr2, self.value2])
# in-place addtion
dn1 += dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.rdn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn1 += self.str_dn2
self.assertEqual(dn1, self.dn3)
self.assertExpectedClass(DN, dn1, 'self')
for i in range(0, len(dn1)):
self.assertExpectedClass(DN, dn1[i], 'RDN')
for j in range(0, len(dn1[i])):
self.assertExpectedClass(DN, dn1[i][j], 'AVA')
# concatenation
dn1 = DN((self.attr1, self.value1))
dn3 = dn1 + dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn1 = DN((self.attr1, self.value1))
dn3 = dn1 + self.rdn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn3 = dn1 + self.str_rdn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
self.assertExpectedClass(DN, dn3[i][0], 'AVA')
dn3 = dn1 + self.str_dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
dn3 = dn1 + self.dn2
self.assertEqual(dn3, self.dn3)
self.assertExpectedClass(DN, dn3, 'self')
self.assertExpectedClass(DN, dn3, 'self')
for i in range(0, len(dn3)):
self.assertExpectedClass(DN, dn3[i], 'RDN')
for j in range(0, len(dn3[i])):
self.assertExpectedClass(DN, dn3[i][j], 'AVA')
def test_find(self):
# -10 -9 -8 -7 -6 -5 -4 -3 -2 -1
dn = DN('t=0,t=1,cn=bob,t=3,t=4,t=5,cn=bob,t=7,t=8,t=9')
pat = DN('cn=bob')
# forward
self.assertEqual(dn.find(pat), 2)
self.assertEqual(dn.find(pat, 1), 2)
self.assertEqual(dn.find(pat, 1, 3), 2)
self.assertEqual(dn.find(pat, 2, 3), 2)
self.assertEqual(dn.find(pat, 6), 6)
self.assertEqual(dn.find(pat, 7), -1)
self.assertEqual(dn.find(pat, 1, 2), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.index(pat, 7), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.index(pat, 1, 2), -1)
# reverse
self.assertEqual(dn.rfind(pat), 6)
self.assertEqual(dn.rfind(pat, -4), 6)
self.assertEqual(dn.rfind(pat, 6), 6)
self.assertEqual(dn.rfind(pat, 6, 8), 6)
self.assertEqual(dn.rfind(pat, 6, 8), 6)
self.assertEqual(dn.rfind(pat, -8), 6)
self.assertEqual(dn.rfind(pat, -8, -4), 6)
self.assertEqual(dn.rfind(pat, -8, -5), 2)
self.assertEqual(dn.rfind(pat, 7), -1)
self.assertEqual(dn.rfind(pat, -3), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.rindex(pat, 7), -1)
with self.assertRaises(ValueError):
self.assertEqual(dn.rindex(pat, -3), -1)
def test_replace(self):
# pylint: disable=no-member
dn = DN('t=0,t=1,t=2,t=3,t=4,t=5,t=6,t=7,t=8,t=9')
with self.assertRaises(AttributeError):
dn.replace
def test_hashing(self):
# create DN's that are equal but differ in case
dn1 = DN((self.attr1.lower(), self.value1.upper()))
dn2 = DN((self.attr1.upper(), self.value1.lower()))
# DNs that are equal should hash to the same value.
self.assertEqual(dn1, dn2)
# Good, everyone's equal, now verify their hash values
self.assertEqual(hash(dn1), hash(dn2))
# Different DN objects with the same value should
# map to 1 common key and 1 member in a set. The key and
# member are based on the object's value.
dn1_a = DN(self.dn1)
dn1_b = DN(self.dn1)
dn2_a = DN(self.dn2)
dn2_b = DN(self.dn2)
dn3_a = DN(self.dn3)
dn3_b = DN(self.dn3)
self.assertEqual(dn1_a, dn1_b)
self.assertEqual(dn2_a, dn2_b)
self.assertEqual(dn3_a, dn3_b)
d = dict()
s = set()
d[dn1_a] = str(dn1_a)
d[dn1_b] = str(dn1_b)
d[dn2_a] = str(dn2_a)
d[dn2_b] = str(dn2_b)
s.add(dn1_a)
s.add(dn1_b)
s.add(dn2_a)
s.add(dn2_b)
self.assertEqual(len(d), 2)
self.assertEqual(len(s), 2)
self.assertEqual(sorted(d), sorted([dn1_a, dn2_a]))
self.assertEqual(sorted(s), sorted([dn1_a, dn2_a]))
self.assertTrue(dn1_a in d)
self.assertTrue(dn1_b in d)
self.assertTrue(dn2_a in d)
self.assertTrue(dn2_b in d)
self.assertFalse(dn3_a in d)
self.assertFalse(dn3_b in d)
self.assertTrue(dn1_a in s)
self.assertTrue(dn1_b in s)
self.assertTrue(dn2_a in s)
self.assertTrue(dn2_b in s)
self.assertFalse(dn3_a in s)
self.assertFalse(dn3_b in s)
class TestEscapes(unittest.TestCase):
def setUp(self):
self.privilege = 'R,W privilege'
self.dn_str_hex_escape = 'cn=R\\2cW privilege,cn=privileges,cn=pbac,dc=idm,dc=lab,dc=bos,dc=redhat,dc=com'
self.dn_str_backslash_escape = 'cn=R\\,W privilege,cn=privileges,cn=pbac,dc=idm,dc=lab,dc=bos,dc=redhat,dc=com'
def test_escape(self):
dn = DN(self.dn_str_hex_escape)
self.assertEqual(dn['cn'], self.privilege)
self.assertEqual(dn[0].value, self.privilege)
dn = DN(self.dn_str_backslash_escape)
self.assertEqual(dn['cn'], self.privilege)
self.assertEqual(dn[0].value, self.privilege)
class TestInternationalization(unittest.TestCase):
def setUp(self):
# Hello in Arabic
self.arabic_hello_utf8 = '\xd9\x85\xd9\x83\xd9\x8a\xd9\x84' + \
'\xd8\xb9\x20\xd9\x85\xd8\xa7\xd9' + \
'\x84\xd9\x91\xd8\xb3\xd9\x84\xd8\xa7'
self.arabic_hello_unicode = self.arabic_hello_utf8.decode('utf-8')
def test_i18n(self):
self.assertEqual(self.arabic_hello_utf8,
self.arabic_hello_unicode.encode('utf-8'))
# AVA's
# test attr i18n
ava1 = AVA(self.arabic_hello_unicode, 'foo')
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.attr, self.arabic_hello_unicode)
self.assertEqual(str(ava1), self.arabic_hello_utf8+'=foo')
ava1 = AVA(self.arabic_hello_utf8, 'foo')
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.attr, self.arabic_hello_unicode)
self.assertEqual(str(ava1), self.arabic_hello_utf8+'=foo')
# test value i18n
ava1 = AVA('cn', self.arabic_hello_unicode)
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.value, self.arabic_hello_unicode)
self.assertEqual(str(ava1), 'cn='+self.arabic_hello_utf8)
ava1 = AVA('cn', self.arabic_hello_utf8)
self.assertIsInstance(ava1.attr, unicode)
self.assertIsInstance(ava1.value, unicode)
self.assertEqual(ava1.value, self.arabic_hello_unicode)
self.assertEqual(str(ava1), 'cn='+self.arabic_hello_utf8)
# RDN's
# test attr i18n
rdn1 = RDN((self.arabic_hello_unicode, 'foo'))
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.attr, self.arabic_hello_unicode)
self.assertEqual(str(rdn1), self.arabic_hello_utf8+'=foo')
rdn1 = RDN((self.arabic_hello_utf8, 'foo'))
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.attr, self.arabic_hello_unicode)
self.assertEqual(str(rdn1), self.arabic_hello_utf8+'=foo')
# test value i18n
rdn1 = RDN(('cn', self.arabic_hello_unicode))
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.value, self.arabic_hello_unicode)
self.assertEqual(str(rdn1), 'cn='+self.arabic_hello_utf8)
rdn1 = RDN(('cn', self.arabic_hello_utf8))
self.assertIsInstance(rdn1.attr, unicode)
self.assertIsInstance(rdn1.value, unicode)
self.assertEqual(rdn1.value, self.arabic_hello_unicode)
self.assertEqual(str(rdn1), 'cn='+self.arabic_hello_utf8)
# DN's
# test attr i18n
dn1 = DN((self.arabic_hello_unicode, 'foo'))
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].attr, self.arabic_hello_unicode)
self.assertEqual(str(dn1), self.arabic_hello_utf8+'=foo')
dn1 = DN((self.arabic_hello_utf8, 'foo'))
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].attr, self.arabic_hello_unicode)
self.assertEqual(str(dn1), self.arabic_hello_utf8+'=foo')
# test value i18n
dn1 = DN(('cn', self.arabic_hello_unicode))
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].value, self.arabic_hello_unicode)
self.assertEqual(str(dn1), 'cn='+self.arabic_hello_utf8)
dn1 = DN(('cn', self.arabic_hello_utf8))
self.assertIsInstance(dn1[0].attr, unicode)
self.assertIsInstance(dn1[0].value, unicode)
self.assertEqual(dn1[0].value, self.arabic_hello_unicode)
self.assertEqual(str(dn1), 'cn='+self.arabic_hello_utf8)
if __name__ == '__main__':
unittest.main()
| msimacek/freeipa | ipatests/test_ipapython/test_dn.py | Python | gpl-3.0 | 45,314 |
from __future__ import absolute_import
"""
The actual record-keeping part of Cirdan.
"""
import inspect
import itertools
import re
from collections import OrderedDict
class Resource:
def __init__(self, cls):
self.title = cls.__name__
self.path = "???"
self.description = None
self.methods = []
self.secret = False
def safe_title(self):
assert(hasattr(self, "title"))
return re.sub("[^A-Za-z0-9_\-]", "", self.title.strip().replace(" ", "_"));
def __str__(self):
return "%s - %s" % (self.title, self.path)
class RouteMethod:
def __init__(self, func):
self.verb = METHODS_TO_VERBS[func.__name__]
self.title = self.verb
self.description = None
self.parameters = []
self.return_statuses = []
self.content_type = None
self.requires_permission = None
self.secret = False
self.example_request = None
self.example_response = None
def __str__(self):
return "%s: %s" % (self.verb, str(self.title))
class Parameter:
def __init__(self, name, description, required):
self.name = name
self.description = description
self.required = required
def __str__(self):
return "%s: %s (required = %s" % (self.name, self.description, repr(self.required))
class ReturnStatus:
def __init__(self, status_code, description):
self.status_code = status_code
self.description = description
# this is an ordered dict because I am a lazy, lazy man
METHODS_TO_VERBS = OrderedDict([
("on_post", "POST"),
("on_get", "GET"),
("on_put", "PUT"),
("on_delete", "DELETE"),
("on_patch", "PATCH")
])
class Registry:
def __init__(self):
self.api_to_resources = {}
self.api_meta = {}
self.resources = {}
self.route_methods = {}
def get(self, item):
if inspect.isclass(item):
if id(item) not in self.resources:
self.resources[id(item)] = Resource(item)
return self.resources[id(item)]
else:
if id(item) not in self.route_methods:
self.route_methods[id(item)] = RouteMethod(item)
return self.route_methods[id(item)]
def knows_about(self, api):
return api in self.api_to_resources
def bind_api(self, api, resource):
if api not in self.api_to_resources:
self.api_to_resources[api] = []
self.api_to_resources[api].append(resource)
def set_api_meta(self, api, **kwargs):
self.api_meta[api] = kwargs
def dump(self, api):
for resource in self.api_to_resources[api]:
print(resource)
for method in resource.methods:
print("\t" + str(method))
registry = Registry()
| forana/python-cirdan | cirdan/registry.py | Python | mit | 2,823 |
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
"""
Organism Creators module:
This module contains the classes used to create organisms for the initial
population.
1. RandomOrganismCreator: creates random organisms
2. FileOrganismCreator: creates organisms by reading their structures from
files
"""
from gasp.general import Organism, Cell
from pymatgen.core.lattice import Lattice
from pymatgen.core.composition import Composition
from fractions import Fraction
import warnings
import os
import math
import numpy as np
class RandomOrganismCreator(object):
"""
Creates random organisms for the initial population.
"""
def __init__(self, random_org_parameters, composition_space, constraints):
"""
Makes a RandomOrganismCreator, and sets default parameter values if
necessary.
Args:
random_org_parameters: the parameters for generating random
organisms
composition_space: the CompositionSpace of the search
constraints: the Constraints of the search
"""
self.name = 'random organism creator'
# defaults
#
# number of random organisms to make (only used for epa searches)
self.default_number = 28
# max number of atoms
if composition_space.objective_function == 'epa':
# make sure we can sample cells with two formula units
target_number = constraints.min_num_atoms + 6
num_formulas = target_number/composition_space.endpoints[
0].num_atoms
if num_formulas < 2:
min_of_max = int(2*composition_space.endpoints[0].num_atoms)
else:
min_of_max = int(round(
num_formulas)*composition_space.endpoints[0].num_atoms)
else:
min_of_max = constraints.min_num_atoms + 6
self.default_max_num_atoms = min(min_of_max, constraints.max_num_atoms)
# allow structure with compositions at the endpoints (for pd searches)
self.default_allow_endpoints = True
# volume scaling behavior
# default volumes per atom of elemental ground state structures
# computed from structures on materials project (materialsproject.org)
self.all_default_vpas = {'H': 13.89, 'He': 15.79, 'Li': 20.12,
'Be': 7.94, 'B': 7.25, 'C': 10.58,
'N': 42.73, 'O': 13.46, 'F': 16.00,
'Ne': 19.93, 'Na': 37.12, 'Mg': 23.04,
'Al': 16.47, 'Si': 20.44, 'P': 23.93,
'S': 36.03, 'Cl': 34.90, 'Ar': 44.87,
'K': 73.51, 'Ca': 42.42, 'Sc': 24.64,
'Ti': 17.11, 'V': 13.41, 'Cr': 11.57,
'Mn': 11.04, 'Fe': 11.55, 'Co': 10.92,
'Ni': 10.79, 'Cu': 11.82, 'Zn': 15.56,
'Ga': 20.34, 'Ge': 23.92, 'As': 22.45,
'Se': 38.13, 'Br': 37.53, 'Kr': 65.09,
'Rb': 90.44, 'Sr': 54.88, 'Y': 32.85,
'Zr': 23.50, 'Nb': 18.31, 'Mo': 15.89,
'Tc': 14.59, 'Ru': 13.94, 'Rh': 14.25,
'Pd': 15.45, 'Ag': 18.00, 'Cd': 23.28,
'In': 27.56, 'Sn': 36.70, 'Sb': 31.78,
'Te': 35.03, 'I': 50.34, 'Xe': 83.51,
'Cs': 116.17, 'Ba': 63.64, 'Hf': 22.50,
'Ta': 18.25, 'W': 16.19, 'Re': 15.06,
'Os': 14.36, 'Ir': 14.55, 'Pt': 15.72,
'Au': 18.14, 'Hg': 31.45, 'Tl': 31.13,
'Pb': 32.30, 'Bi': 36.60, 'La': 37.15,
'Ce': 26.30, 'Pr': 36.47, 'Nd': 35.44,
'Pm': 34.58, 'Sm': 33.88, 'Eu': 46.28,
'Gd': 33.33, 'Tb': 32.09, 'Dy': 31.57,
'Ho': 31.45, 'Er': 30.90, 'Tm': 30.30,
'Yb': 40.45, 'Lu': 29.43, 'Ac': 45.52,
'Th': 32.03, 'Pa': 25.21, 'U': 19.98,
'Np': 18.43, 'Pu': 18.34}
self.default_vpas = self.get_default_vpas(composition_space)
# set to defaults
if random_org_parameters in (None, 'default'):
self.number = self.default_number
self.max_num_atoms = self.default_max_num_atoms
self.allow_endpoints = self.default_allow_endpoints
self.vpas = self.default_vpas
# parse the parameters and set to defaults if necessary
else:
# the number to make
if 'number' not in random_org_parameters:
self.number = self.default_number
elif random_org_parameters['number'] in (None, 'default'):
self.number = self.default_number
else:
self.number = random_org_parameters['number']
# the max number of atoms
if 'max_num_atoms' not in random_org_parameters:
self.max_num_atoms = self.default_max_num_atoms
elif random_org_parameters['max_num_atoms'] in (None, 'default'):
self.max_num_atoms = self.default_max_num_atoms
elif random_org_parameters['max_num_atoms'] > \
constraints.max_num_atoms:
print('The value passed to the "max_num_atoms" keyword in the '
'InitialPopulation block may not exceed the value passed'
' to the "max_num_atoms" keyword in the Constraints '
'block.')
print('Quitting...')
quit()
elif random_org_parameters['max_num_atoms'] < \
constraints.min_num_atoms:
print('The value passed to the "max_num_atoms" keyword in the '
'InitialPopulation block may not be smaller than the '
'value passed to the "min_num_atoms" keyword in the '
'Constraints block.')
print('Quitting...')
quit()
else:
self.max_num_atoms = random_org_parameters['max_num_atoms']
# allowing composition space endpoints (only used for pd searches)
if 'allow_endpoints' not in random_org_parameters:
self.allow_endpoints = self.default_allow_endpoints
elif random_org_parameters['allow_endpoints'] in (None, 'default'):
self.allow_endpoints = self.default_allow_endpoints
else:
self.allow_endpoints = random_org_parameters['allow_endpoints']
# volume scaling
self.vpas = self.default_vpas
if 'volumes_per_atom' not in random_org_parameters:
pass
elif random_org_parameters['volumes_per_atom'] in (None,
'default'):
pass
else:
# replace the specified volumes per atom with the given values
for symbol in random_org_parameters['volumes_per_atom']:
self.vpas[symbol] = random_org_parameters[
'volumes_per_atom'][symbol]
self.num_made = 0 # number added to initial population
self.is_successes_based = True # it's based on number added
self.is_finished = False
def get_default_vpas(self, composition_space):
"""
Returns a dictionary containing the default volumes per atom for all
the elements in the composition space.
Args:
composition_space: the CompositionSpace of the search
"""
default_vpas = {}
for element in composition_space.get_all_elements():
default_vpas[element.symbol] = self.all_default_vpas[
element.symbol]
return default_vpas
def create_organism(self, id_generator, composition_space, constraints,
random):
"""
Creates a random organism for the initial population.
Returns a random organism, or None if an error was encountered during
volume scaling.
Note: for phase diagram searches, this is will not create structures
with compositions equivalent to the endpoints of the composition
space. Reference structures at those compositions should be
provided with the FileOrganismCreator.
Args:
id_generator: the IDGenerator used to assign id numbers to all
organisms
composition_space: the CompositionSpace of the search
constraints: the Constraints of the search
random: a copy of Python's built in PRNG
"""
# make a random lattice
random_lattice = self.make_random_lattice(constraints, random)
# get a list of species for the random organism
species = self.get_species_list(composition_space, constraints, random)
if species is None: # could happen for pd searches...
return None
# for each specie, generate a set of random fractional coordinates
random_coordinates = []
for _ in range(len(species)):
random_coordinates.append([random.random(), random.random(),
random.random()])
# make a random cell
random_cell = Cell(random_lattice, species, random_coordinates)
# optionally scale the volume of the random structure
if not self.scale_volume(random_cell):
return None # sometimes pymatgen's scaling algorithm crashes
# make the random organism
random_org = Organism(random_cell, id_generator, self.name,
composition_space)
print('Random organism creator making organism {} '.format(
random_org.id))
return random_org
def make_random_lattice(self, constraints, random):
"""
Returns a random lattice that satisfies the constraints on maximum and
minimum lengths and angles.
Args:
constraints: the Constraints of the search
random: a copy of Python's built in PRNG
"""
# make three random lattice vectors that satisfy the length constraints
a = constraints.min_lattice_length + random.random()*(
constraints.max_lattice_length - constraints.min_lattice_length)
b = constraints.min_lattice_length + random.random()*(
constraints.max_lattice_length - constraints.min_lattice_length)
c = constraints.min_lattice_length + random.random()*(
constraints.max_lattice_length - constraints.min_lattice_length)
# make three random lattice angles that satisfy the angle constraints
alpha = constraints.min_lattice_angle + random.random()*(
constraints.max_lattice_angle - constraints.min_lattice_angle)
beta = constraints.min_lattice_angle + random.random()*(
constraints.max_lattice_angle - constraints.min_lattice_angle)
gamma = constraints.min_lattice_angle + random.random()*(
constraints.max_lattice_angle - constraints.min_lattice_angle)
# build the random lattice
return Lattice.from_parameters(a, b, c, alpha, beta, gamma)
def get_species_list(self, composition_space, constraints, random):
"""
Returns a list containing the species in the random organism.
Args:
composition_space: the CompositionSpace of the search
constraints: the Constraints of the search
random: a copy of Python's built in PRNG
"""
if composition_space.objective_function == 'epa':
return self.get_epa_species_list(composition_space, constraints,
random)
elif composition_space.objective_function == 'pd':
return self.get_pd_species_list(composition_space, constraints,
random)
def get_epa_species_list(self, composition_space, constraints, random):
"""
Returns a list containing the species in the random organism.
Precondition: the composition space contains only one endpoint
(it's a fixed-composition search)
Args:
composition_space: the CompositionSpace of the search
constraints: the Constraints of the search
random: a copy of Python's built in PRNG
Description:
1. Computes the minimum and maximum number of formula units from
the minimum (constraints.min_num_atoms) and maximum
(self.max_num_atoms) number of atoms and the number of atoms
per formula unit.
2. Gets a random number of formula units within the range allowed
by the minimum and maximum number of formula units.
3. Computes the number of atoms of each species from the random
number of formula units.
"""
# get random number of formula units and resulting number of atoms
reduced_formula = composition_space.endpoints[0].reduced_composition
num_atoms_in_formula = reduced_formula.num_atoms
max_num_formulas = int(math.floor(
self.max_num_atoms/num_atoms_in_formula))
min_num_formulas = int(math.ceil(
constraints.min_num_atoms/num_atoms_in_formula))
# round up the next formula unit if necessary
if max_num_formulas < min_num_formulas:
max_num_formulas += 1
random_num_formulas = random.randint(min_num_formulas,
max_num_formulas)
# add the right number of each specie
species = []
for specie in reduced_formula:
for _ in range(random_num_formulas*int(reduced_formula[specie])):
species.append(specie)
return species
def get_pd_species_list(self, composition_space, constraints, random):
"""
Returns a list containing the species in the random organism.
Precondition: the composition space contains multiple endpoints
(it's a fixed-composition search)
Args:
composition_space: the CompositionSpace of the search
constraints: the Constraints of the search
random: a copy of Python's built in PRNG
Description:
1. Gets a random fraction of each composition space endpoint such
that the fractions sum to 1.
2. Computes the fraction of each specie from the fraction of each
endpoint and the amount of each specie within each endpoint.
3. Approximates the fraction of each specie as a rational number
with a maximum possible denominator of self.max_num_atoms.
4. Takes the product of the denominators of all the species'
rational fractions, and then multiplies each specie's rational
fraction by this product to obtain the number of atoms of that
species.
5. Checks if the total number of atoms exceeds self.max_num_atoms.
If so, reduce the amount of each atom with a multiplicative
factor.
6. Reduces the resulting composition (i.e., find the smallest
number of atoms needed to describe the composition).
7. Optionally increases the number of atoms (w/o changing the
composition) such that the min num atoms constraint is
satisfied if possible.
8. Checks that the resulting number of atoms satisfies the maximum
(self.max_num_atoms) number of atoms constraint, and optionally
checks that the resulting composition is not equivalent to one
of the endpoint compositions.
"""
# get random fractions for each endpoint that sum to one 1 (i.e., a
# random location in the composition space
fracs = self.get_random_endpoint_fractions(composition_space, random)
composition_space.endpoints.sort()
endpoint_fracs = {}
for i in range(len(fracs)):
endpoint_fracs[composition_space.endpoints[i]] = fracs[i]
# compute amount of each element from amount of each endpoint
all_elements = composition_space.get_all_elements()
element_amounts = {}
for element in all_elements:
element_amounts[element] = 0
for formula in endpoint_fracs:
for element in formula:
element_amounts[element] += endpoint_fracs[
formula]*formula[element]
# normalize the amounts of the elements
amounts_sum = 0
for element in element_amounts:
amounts_sum += element_amounts[element]
for element in element_amounts:
element_amounts[element] = element_amounts[element]/amounts_sum
# approximate the decimal amount of each element as a fraction
# (rational number)
rational_amounts = {}
for element in element_amounts:
rational_amounts[element] = Fraction(
element_amounts[element]).limit_denominator(
self.max_num_atoms)
# multiply the denominators together, then multiply each fraction
# by this result to get the number of atoms of each element
denom_product = 1.0
for element in rational_amounts:
denom_product *= rational_amounts[element].denominator
for element in rational_amounts:
element_amounts[element] = round(float(
denom_product)*rational_amounts[element])
# see how many total atoms we have
num_atoms = 0
for element in element_amounts:
num_atoms += element_amounts[element]
# reduce the number of atoms of each element if needed
if num_atoms > self.max_num_atoms:
numerator = random.randint(
int(round(0.5*(constraints.min_num_atoms +
self.max_num_atoms))), self.max_num_atoms)
factor = numerator/num_atoms
for element in element_amounts:
element_amounts[element] = round(
factor*element_amounts[element])
# make a Composition object from the amounts of each element
random_composition = Composition(element_amounts)
random_composition = random_composition.reduced_composition
# possibly increase the number of atoms by a random (allowed) amount
min_multiple = int(
math.ceil(constraints.min_num_atoms/random_composition.num_atoms))
max_multiple = int(
math.floor(self.max_num_atoms/random_composition.num_atoms))
if max_multiple > min_multiple:
random_multiple = random.randint(min_multiple, max_multiple)
bigger_composition = {}
for element in random_composition:
bigger_composition[element] = \
random_multiple*random_composition[element]
random_composition = Composition(bigger_composition)
# check the max number of atoms constraints (should be ok)
if int(random_composition.num_atoms) > self.max_num_atoms:
return None
# check the composition - only allow endpoints if specified
if not self.allow_endpoints:
for endpoint in composition_space.endpoints:
if endpoint.almost_equals(
random_composition.reduced_composition):
return None
# save the element objects
species = []
for specie in random_composition:
for _ in range(int(random_composition[specie])):
species.append(specie)
return species
def get_random_endpoint_fractions(self, composition_space, random):
"""
Uniformly samples the composition space. Returns a list containing the
fractions of each endpoint composition (that sum to 1).
Args:
composition_space: the CompositionSpace of the search
random: a copy of Python's built-in PRNG
Description:
1. Computes vectors that span the normalized composition space
(e.g., the triangular facet for a ternary system) by
subtracting the first composition fraction unit vector from the
others.
2. Takes a random linear combination of these vectors by
multiplying each one by a uniform random number and then taking
their sum.
3. Adds the first composition unit vector to the result from step 2
to obtain a vector with random fractions of each endpoint
composition.
4. Checks that the vector from step 3 lies in the portion of the
plane that corresponds to normalized amounts. This is done be
checking that amount of the first endpoint composition is
non-negative. If it's negative, calls itself recursively until
a valid solution is found.
"""
# compute the vectors corresponding to the needed binary edges of the
# phase diagram (w.r.t. to the first endpoint of the composition space)
num_endpoints = len(composition_space.endpoints)
bindary_edges = []
for i in range(1, num_endpoints):
edge = [-1]
for j in range(1, num_endpoints):
if j == i:
edge.append(1)
else:
edge.append(0)
bindary_edges.append(np.array(edge))
# take a linear combination of the edge vectors, where the weight of
# each vector is drawn from a uniform distribution
weighted_average = random.random()*bindary_edges[0]
for i in range(1, len(bindary_edges)):
weighted_average = np.add(weighted_average,
random.random()*bindary_edges[i])
# add the first unit vector to the weighted average of the edge
# vectors to obtain the fractions of each endpoint
endpoint_fracs = weighted_average.tolist()
endpoint_fracs[0] = endpoint_fracs[0] + 1
# check that the computed fraction of the first endpoint is not less
# than zero. If it is, try again.
if endpoint_fracs[0] < 0:
return self.get_random_endpoint_fractions(composition_space,
random)
else:
return endpoint_fracs
def scale_volume(self, random_cell):
"""
Scales the volume of the random cell according the values in
self.vpas.
Returns a boolean indicating whether volume scaling was completed
without errors.
Args:
random_cell: the random Cell whose volume to possibly scale
"""
# compute the volume to scale to
composition = random_cell.composition
total_volume = 0
for specie in composition:
total_volume += composition[specie]*self.vpas[specie.symbol]
# scale the volume
with warnings.catch_warnings():
warnings.simplefilter('ignore')
random_cell.scale_lattice(total_volume)
if str(random_cell.lattice.a) == 'nan' or \
random_cell.lattice.a > 100:
return False
else:
return True
def update_status(self):
'''
Increments num_made, and if necessary, updates is_finished.
'''
self.num_made = self.num_made + 1
print('Organisms left for {}: {} '.format(
self.name, self.number - self.num_made))
if self.num_made == self.number:
self.is_finished = True
class FileOrganismCreator(object):
"""
Creates organisms from files (poscar or cif) for the initial population.
"""
def __init__(self, path_to_folder):
"""
Makes a FileOrganismCreator.
Args:
path_to_folder: the path to the folder containing the files from
which to make organisms
Precondition: the folder exists and contains files
"""
self.name = 'file organism creator'
self.path_to_folder = path_to_folder
self.files = [f for f in os.listdir(self.path_to_folder) if
os.path.isfile(os.path.join(self.path_to_folder, f))]
self.number = len(self.files)
self.num_made = 0 # number of attempts (usually number of files given)
self.is_successes_based = False # it's based on number attempted
self.is_finished = False
def create_organism(self, id_generator, composition_space, constraints,
random):
"""
Creates an organism for the initial population from a poscar or cif
file.
Returns an organism, or None if one could not be created.
Args:
id_generator: the IDGenerator used to assign id numbers to all
organisms
composition_space: the CompositionSpace of the search
constraints: the Constraints of the search
random: a copy of Python's built in PRNG
TODO: the last three arguments are never actually used in this method,
but I included them so the method has the same arguments as
RandomOrganismCreator.create_organism() to allow the
create_organism method to be called on both RandomOrganismCreator
and FileOrganismCreator without having to know in advance which one
it is. Maybe there's a better way to deal with this...
"""
if self.files[self.num_made - 1].endswith('.cif') or self.files[
self.num_made - 1].startswith('POSCAR'):
try:
new_cell = Cell.from_file(
str(self.path_to_folder) + '/' + str(
self.files[self.num_made - 1]))
new_org = Organism(new_cell, id_generator, self.name,
composition_space)
print('Making organism {} from file: {} '.format(
new_org.id, self.files[self.num_made - 1]))
self.update_status()
return new_org
except:
print('Error reading structure from file: {} '.format(
self.files[self.num_made - 1]))
self.update_status()
return None
else:
print('File {} has invalid extension - file must end with .cif or '
'begin with POSCAR '.format(self.files[self.num_made - 1]))
self.update_status()
return None
def get_cells(self):
"""
Creates cells from the files and puts them in a list.
Returns the list of Cell objects.
Used for checking if all the composition space endpoint are included
for phase diagram searches.
"""
file_cells = []
for cell_file in self.files:
if cell_file.endswith('.cif') or cell_file.startswith(
'POSCAR'):
try:
new_cell = Cell.from_file(
str(self.path_to_folder) + "/" + str(cell_file))
file_cells.append(new_cell)
except:
pass
return file_cells
def update_status(self):
"""
Increments num_made, and if necessary, updates is_finished.
"""
self.num_made = self.num_made + 1
print('Organisms left for {}: {} '.format(
self.name, self.number - self.num_made))
if self.num_made == len(self.files):
self.is_finished = True
| henniggroup/GASP-python | gasp/organism_creators.py | Python | mit | 28,520 |
## INFO ########################################################################
## ##
## plastey ##
## ======= ##
## ##
## Oculus Rift + Leap Motion + Python 3 + C + Blender + Arch Linux ##
## Version: 0.2.0.980 (20150510) ##
## File: hud.py ##
## ##
## For more information about the project, visit ##
## <http://plastey.kibu.hu>. ##
## Copyright (C) 2015 Peter Varo, Kitchen Budapest ##
## ##
## This program is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License as published by the ##
## Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ##
## See the GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program, most likely a file in the root directory, ##
## called 'LICENSE'. If not, see <http://www.gnu.org/licenses>. ##
## ##
######################################################################## INFO ##
# Import python modules
from collections import deque
#------------------------------------------------------------------------------#
class Text:
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, text_first_object,
text_other_object,
time_getter,
interval):
self._text_first = text_first_object
self._text_other = text_other_object
self._get_time = time_getter
self._interval = interval
self._last_time = time_getter()
self._messages = deque()
self._still_empty = True
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _update(self):
# Write the changed and constructed messages to display
messages = iter(self._messages)
try:
self._text_first.text = next(messages)
self._text_other.text = '\n'.join(messages)
except StopIteration:
self._text_first.text = self._text_other.text = ''
# Update timer
self._last_time = self._get_time()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def clear(self):
self._messages = deque()
self._update()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def update(self):
# If there are any messages left
if len(self._messages):
# If interval passed
if (self._last_time + self._interval) <= self._get_time():
# Remove oldest item
self._messages.pop()
# Update display
self._update()
# If deque just become empty
elif not self._still_empty:
# Switch state flag and update display
self._still_empty = True
self._update()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def write(self, message):
# Add new message and update display
self._messages.appendleft(message)
self._update()
| kitchenbudapest/vr | hud.py | Python | gpl-3.0 | 4,413 |
from kivy.vector import Vector
from cobiv.modules.core.entity import Entity
class GestureManager(Entity):
__touches = []
strategies = {}
current_strategy = None
strategy_candidates = []
stroke_list = {}
last_tick = None
first_tick = None
stroke_error_margin = 20
stroke_tick_time = 0.1
stroke_identify_timeout = 0.3
last_touches = {}
def __init__(self):
super(GestureManager, self).__init__()
def build_yaml_config(self, config):
return super(GestureManager, self).build_yaml_config(config)
def ready(self):
for gesture_strategy in self.lookups('Gesture'):
touch_count = gesture_strategy.required_touch_count()
if not touch_count in self.strategies:
self.strategies[touch_count] = []
self.strategies[touch_count].append(gesture_strategy)
def on_touch_down(self, touch):
self.__touches.append(touch)
nb_touch = len(self.__touches)
if nb_touch >= 2: # and nb_touch in self.strategies:
self.strategy_candidates = []
if nb_touch in self.strategies:
for strategy in self.strategies[nb_touch]:
strategy.initialize(self.__touches)
self.strategy_candidates.append(strategy)
self.current_strategy = None
self.last_tick = touch.time_update
self.first_tick = touch.time_update
self.stroke_list = {}
self.last_touches = {}
for t in self.__touches:
# print("init touch",t.uid)
self.last_touches[t.uid] = t.pos
self.stroke_list[t.uid] = [Vector(0, 0)]
else:
self.last_touches = {}
def on_touch_up(self, touch):
nb_touch = len(self.__touches)
if nb_touch >= 2 and nb_touch in self.strategies:
self.update_last_touch(touch)
self.stroke_list[touch.uid][-1] = self.round_vector(self.stroke_list[touch.uid][-1].normalize())
if nb_touch >= 2 and nb_touch in self.strategies:
if self.current_strategy is not None:
self.current_strategy.finalize(self.__touches, self.stroke_list)
self.current_strategy = None
self.__touches.remove(touch)
def on_touch_move(self, touch):
if not touch.uid in self.last_touches:
return
# print(self.stroke_list)
self.update_last_touch(touch)
if touch.time_update - self.last_tick > self.stroke_tick_time:
self.add_stroke(touch)
self.process_or_validate_strategies(touch)
def add_stroke(self, touch):
do_new_stroke = False
for t in self.__touches:
self.last_touches[t.uid] = t.pos
# check if current stroke is not null nor identical to previous
v = self.stroke_list[touch.uid][-1]
if v.length() > 0:
if len(self.stroke_list[touch.uid]) > 1:
do_new_stroke = do_new_stroke or (v - self.stroke_list[touch.uid][-2]).length() > 0
else:
do_new_stroke = True
if do_new_stroke:
for t in self.__touches:
self.stroke_list[t.uid].append(Vector(0, 0))
self.last_tick = touch.time_update
def process_or_validate_strategies(self, touch):
if self.current_strategy is not None:
self.current_strategy.process(self.__touches, self.stroke_list)
else:
if touch.time_update - self.first_tick > self.stroke_identify_timeout:
self.strategy_candidates = [c for c in self.strategy_candidates if
c.validate(self.__touches, self.stroke_list)]
if len(self.strategy_candidates) == 1:
self.current_strategy = self.strategy_candidates[0]
self.current_strategy.process(self.__touches, self.stroke_list)
def get_touch_count(self):
return len(self.__touches)
def round_vector(self, v):
def sign(x):
return (x > 0) - (x < 0)
if abs(v.y) <= 0.38:
return Vector(sign(v.x), 0)
elif abs(v.x) <= 0.38:
return Vector(0, sign(v.y))
else:
return Vector(sign(v.x), sign(v.y))
def update_last_touch(self, touch):
tx, ty = self.last_touches[touch.uid]
v = Vector(touch.x - tx, touch.y - ty)
v1 = v
if 0 < v.length() < self.stroke_error_margin:
v = Vector(0, 0)
if v.length() > 0:
self.stroke_list[touch.uid][-1] = self.round_vector(v.normalize())
| gokudomatic/cobiv | cobiv/modules/core/gestures/gesture_manager.py | Python | mit | 4,701 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# serialize.py
#
# Copyright 2013 tusharmakkar08 <tusharmakkar08@tusharmakkar08-Satellite-C660>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Importing modules
import cPickle
_end = '_end_'
# Implementation of Trie using Dictionaries in python
def make_trie(words):
root=dict()
for i in words :
cd=root
for letter in i:
cd=cd.setdefault(letter,{})
cd=cd.setdefault(_end,_end)
return root
def in_trie(trie,word):
cd=trie
for letter in word:
if letter in cd:
cd=cd[letter]
else:
return False
else:
if _end in cd:
return True
else:
return False
try:
# Getting Data Back from Secondary file So avoids makin trie again and again
newt=open("result.txt",'r')
print "Fetching from secondary memory ... "
TR=cPickle.load(newt)
newt.close()
except IOError:
# Opening Data File
print "Making of trie"
List =open("20million.txt").readlines()
inp=[]
# Code to remove \n from the file
for i in List:
k=i.strip()
if k!='':
inp.append(k.lower())
TR=make_trie(inp)
# Dumping data into file
newres=open("result.txt",'w')
cPickle.dump(TR,newres)
newres.close()
while 1:
r=raw_input("Enter string or -1 to exit\n")
if r=="-1":
break
else:
kit=r.lower()
print in_trie(TR,kit)
| tusharmakkar08/TRIE_Data_Structure | serialize.py | Python | mit | 1,972 |
#!/usr/bin/env python
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# gst-python
# Copyright (C) 2005 Andy Wingo <wingo@pobox.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
# A test more of gst-plugins than of gst-python.
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import pygst
pygst.require('0.10')
import gst
import fvumeter
def clamp(x, min, max):
if x < min:
return min
elif x > max:
return max
return x
class Window(gtk.Dialog):
def __init__(self):
gtk.Dialog.__init__(self, 'Volume Level')
self.prepare_ui()
def prepare_ui(self):
self.set_default_size(200,60)
self.set_title('Volume Level')
self.connect('delete-event', lambda *x: gtk.main_quit())
self.vus = []
self.vus.append(fvumeter.FVUMeter())
self.vus.append(fvumeter.FVUMeter())
self.vbox.add(self.vus[0])
self.vbox.add(self.vus[1])
self.vus[0].show()
self.vus[1].show()
def error(self, message, secondary=None):
m = gtk.MessageDialog(self,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR,
gtk.BUTTONS_OK,
message)
if secondary:
m.format_secondary_text(secondary)
m.run()
def on_message(self, bus, message):
if message.structure.get_name() == 'level':
s = message.structure
for i in range(0, len(s['peak'])):
self.vus[i].freeze_notify()
decay = clamp(s['decay'][i], -90.0, 0.0)
peak = clamp(s['peak'][i], -90.0, 0.0)
if peak > decay:
print "ERROR: peak bigger than decay!"
self.vus[i].set_property('decay', decay)
self.vus[i].set_property('peak', peak)
return True
def run(self):
try:
self.set_sensitive(False)
s = 'alsasrc ! level message=true ! fakesink'
pipeline = gst.parse_launch(s)
self.set_sensitive(True)
pipeline.get_bus().add_signal_watch()
i = pipeline.get_bus().connect('message::element', self.on_message)
pipeline.set_state(gst.STATE_PLAYING)
gtk.Dialog.run(self)
pipeline.get_bus().disconnect(i)
pipeline.get_bus().remove_signal_watch()
pipeline.set_state(gst.STATE_NULL)
except gobject.GError, e:
self.set_sensitive(True)
self.error('Could not create pipeline', e.__str__)
if __name__ == '__main__':
w = Window()
w.show_all()
w.run()
| freedesktop-unofficial-mirror/gstreamer__gst-python | old_examples/vumeter.py | Python | lgpl-2.1 | 3,454 |
#!/usr/bin/env python
#
# Copyright 2021 Malte Lenhart
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import pmt
import time
# this test tests message strobe and message debug blocks against each other
# similar tests contained in message_debug class
# this tests the periodic message output and the input port to change the message
class qa_message_strobe(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_t(self):
test_str = "test_msg"
new_msg = "new_msg"
message_period_ms = 100
msg_strobe = blocks.message_strobe(
pmt.intern(test_str), message_period_ms)
msg_debug = blocks.message_debug()
self.tb.msg_connect(msg_strobe, "strobe", msg_debug, "store")
self.tb.start()
self.assertAlmostEqual(msg_debug.num_messages(),
0, delta=2) # 1st call, expect 0
time.sleep(1) # floor(1000/100) = 10
self.assertAlmostEqual(msg_debug.num_messages(),
10, delta=3) # 2nd call == 1
time.sleep(1) # floor(2000/100) = 15
self.assertAlmostEqual(msg_debug.num_messages(),
20, delta=3) # 3th call == 3
# change test message
msg_strobe.to_basic_block()._post(pmt.intern("set_msg"), pmt.intern(new_msg))
time.sleep(1)
self.tb.stop()
self.tb.wait()
# check data
# first received message matches initial test message
self.assertAlmostEqual(pmt.to_python(msg_debug.get_message(
0)), test_str, "mismatch initial test string")
# last message matches changed test message
no_msgs = msg_debug.num_messages()
self.assertAlmostEqual(pmt.to_python(msg_debug.get_message(
no_msgs - 1)), new_msg, "failed to update string")
if __name__ == '__main__':
gr_unittest.run(qa_message_strobe)
| dl1ksv/gnuradio | gr-blocks/python/blocks/qa_message_strobe.py | Python | gpl-3.0 | 2,087 |
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
from PWGJE.EMCALJetTasks.Tracks.analysis.base.SpectrumFitter import MinBiasFitter
from PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics import GraphicsObject
from PWGJE.EMCALJetTasks.Tracks.analysis.base.DataCollection import DataCollection, Datapoint
class TriggerTurnonCurve:
def __init__(self, name, emcaldata, minbiasdata, fitmin = 15):
self.__name = name
self.__mbfitter = MinBiasFitter("mbfitter", minbiasdata)
self.__values = self.__Create(emcaldata)
def __Create(self, emcaldata):
result = DataCollection("turnonCurve%s" %(self.__name));
for mybin in range(1, emcaldata.GetXaxis().GetNbins()+1):
minval = emcaldata.GetXaxis().GetBinLowEdge(mybin)
if minval < 15:
continue
maxval = emcaldata.GetXaxis().GetBinUpEdge(mybin)
binnedMb = self.__mbfitter.CalculateBinMean(minval, maxval)
statError = emcaldata.GetBinError(mybin)/binnedMb
datapoint = Datapoint(emcaldata.GetXaxis().GetBinCenter(mybin), emcaldata.GetBinContent(mybin)/binnedMb, emcaldata.GetXaxis().GetBinWidth(mybin)/2.)
datapoint.AddErrorSource("stat", statError, statError)
result.AddDataPoint(datapoint)
return result;
def GetPoints(self):
return self.__values.MakeErrorGraphForSource("stat")
def GetName(self):
return self.__name
def MakeGraphicsObject(self, style):
return GraphicsObject(self.GetPoints(), style)
def WriteData(self, name):
self.GetPoints().Write("turnonCurve%s" %(name))
| ppribeli/AliPhysics | PWGJE/EMCALJetTasks/Tracks/analysis/util/TriggerTurnonCurve.py | Python | bsd-3-clause | 2,682 |
''' config module: various singleton configuration modules for PyHeron '''
__all__ = ['system_config']
| lewiskan/heron | heron/common/src/python/config/__init__.py | Python | apache-2.0 | 103 |
# -*- coding: utf-8 -*-
import os
import shutil
import unittest
import six
import pytest
import eyed3
from eyed3 import main, id3, core, compat
from . import DATA_D, RedirectStdStreams
def testPluginOption():
for arg in ["--help", "-h"]:
# When help is requested and no plugin is specified, use default
with RedirectStdStreams() as out:
try:
args, _, config = main.parseCommandLine([arg])
except SystemExit as ex:
assert ex.code == 0
out.stdout.seek(0)
sout = out.stdout.read()
assert sout.find("Plugin options:\n Classic eyeD3") != -1
# When help is requested and all default plugin names are specified
for plugin_name in ["classic"]:
for args in [["--plugin=%s" % plugin_name, "--help"]]:
with RedirectStdStreams() as out:
try:
args, _, config = main.parseCommandLine(args)
except SystemExit as ex:
assert ex.code == 0
out.stdout.seek(0)
sout = out.stdout.read()
assert sout.find("Plugin options:\n Classic eyeD3") != -1
@unittest.skipIf(not os.path.exists(DATA_D), "test requires data files")
def testReadEmptyMp3():
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine([os.path.join(DATA_D,
"test.mp3")])
retval = main.main(args, config)
assert retval == 0
assert out.stderr.read().find("No ID3 v1.x/v2.x tag found") != -1
class TestDefaultPlugin(unittest.TestCase):
def __init__(self, name):
super(TestDefaultPlugin, self).__init__(name)
self.orig_test_file = "%s/test.mp3" % DATA_D
self.test_file = "/tmp/test.mp3"
@unittest.skipIf(not os.path.exists(DATA_D), "test requires data files")
def setUp(self):
shutil.copy(self.orig_test_file, self.test_file)
def tearDown(self):
# TODO: could remove the tag and compare audio file to original
os.remove(self.test_file)
def _addVersionOpt(self, version, opts):
if version == id3.ID3_DEFAULT_VERSION:
return
if version[0] == 1:
opts.append("--to-v1.1")
elif version[:2] == (2, 3):
opts.append("--to-v2.3")
elif version[:2] == (2, 4):
opts.append("--to-v2.4")
else:
assert not("Unhandled version")
def testNewTagArtist(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["-a", "The Cramps", self.test_file],
["--artist=The Cramps", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert retval == 0
af = eyed3.load(self.test_file)
assert af is not None
assert af.tag is not None
assert af.tag.artist == u"The Cramps"
def testNewTagComposer(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["--composer=H.R.", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert retval == 0
af = eyed3.load(self.test_file)
assert af is not None
assert af.tag is not None
assert af.tag.composer == u"H.R."
def testNewTagAlbum(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["-A", "Psychedelic Jungle", self.test_file],
["--album=Psychedelic Jungle", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.album == u"Psychedelic Jungle")
def testNewTagAlbumArtist(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["-b", "Various Artists", self.test_file],
["--album-artist=Various Artists", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert af is not None
assert af.tag is not None
assert af.tag.album_artist == u"Various Artists"
def testNewTagTitle(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["-t", "Green Door", self.test_file],
["--title=Green Door", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.title == u"Green Door")
def testNewTagTrackNum(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["-n", "14", self.test_file],
["--track=14", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.track_num[0] == 14)
def testNewTagTrackNumInvalid(self):
for opts in [ ["-n", "abc", self.test_file],
["--track=-14", self.test_file]
]:
with RedirectStdStreams() as out:
try:
args, _, config = main.parseCommandLine(opts)
except SystemExit as ex:
assert ex.code != 0
else:
assert not("Should not have gotten here")
def testNewTagTrackTotal(self, version=id3.ID3_DEFAULT_VERSION):
if version[0] == 1:
# No support for this in v1.x
return
for opts in [ ["-N", "14", self.test_file],
["--track-total=14", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.track_num[1] == 14)
def testNewTagGenre(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["-G", "Rock", self.test_file],
["--genre=Rock", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.genre.name == "Rock")
assert (af.tag.genre.id == 17)
def testNewTagYear(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["-Y", "1981", self.test_file],
["--release-year=1981", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
if version == id3.ID3_V2_3:
assert (af.tag.original_release_date.year == 1981)
else:
assert (af.tag.release_date.year == 1981)
def testNewTagReleaseDate(self, version=id3.ID3_DEFAULT_VERSION):
for date in ["1981", "1981-03-06", "1981-03"]:
orig_date = core.Date.parse(date)
for opts in [ ["--release-date=%s" % str(date), self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.release_date == orig_date)
def testNewTagOrigRelease(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["--orig-release-date=1981", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.original_release_date.year == 1981)
def testNewTagRecordingDate(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["--recording-date=1993-10-30", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.recording_date.year == 1993)
assert (af.tag.recording_date.month == 10)
assert (af.tag.recording_date.day == 30)
def testNewTagEncodingDate(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["--encoding-date=2012-10-23T20:22", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.encoding_date.year == 2012)
assert (af.tag.encoding_date.month == 10)
assert (af.tag.encoding_date.day == 23)
assert (af.tag.encoding_date.hour == 20)
assert (af.tag.encoding_date.minute == 22)
def testNewTagTaggingDate(self, version=id3.ID3_DEFAULT_VERSION):
for opts in [ ["--tagging-date=2012-10-23T20:22", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.tagging_date.year == 2012)
assert (af.tag.tagging_date.month == 10)
assert (af.tag.tagging_date.day == 23)
assert (af.tag.tagging_date.hour == 20)
assert (af.tag.tagging_date.minute == 22)
def testNewTagPlayCount(self):
for expected, opts in [ (0, ["--play-count=0", self.test_file]),
(1, ["--play-count=+1", self.test_file]),
(6, ["--play-count=+5", self.test_file]),
(7, ["--play-count=7", self.test_file]),
(10000, ["--play-count=10000", self.test_file]),
]:
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.play_count == expected)
def testNewTagPlayCountInvalid(self):
for expected, opts in [ (0, ["--play-count=", self.test_file]),
(0, ["--play-count=-24", self.test_file]),
(0, ["--play-count=+", self.test_file]),
(0, ["--play-count=abc", self.test_file]),
(0, ["--play-count=False", self.test_file]),
]:
with RedirectStdStreams() as out:
try:
args, _, config = main.parseCommandLine(opts)
except SystemExit as ex:
assert ex.code != 0
else:
assert not("Should not have gotten here")
def testNewTagBpm(self):
for expected, opts in [ (1, ["--bpm=1", self.test_file]),
(180, ["--bpm=180", self.test_file]),
(117, ["--bpm", "116.7", self.test_file]),
(116, ["--bpm", "116.4", self.test_file]),
]:
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.bpm == expected)
def testNewTagBpmInvalid(self):
for expected, opts in [ (0, ["--bpm=", self.test_file]),
(0, ["--bpm=-24", self.test_file]),
(0, ["--bpm=+", self.test_file]),
(0, ["--bpm=abc", self.test_file]),
(0, ["--bpm", "=180", self.test_file]),
]:
with RedirectStdStreams() as out:
try:
args, _, config = main.parseCommandLine(opts)
except SystemExit as ex:
assert ex.code != 0
else:
assert not("Should not have gotten here")
def testNewTagPublisher(self):
for expected, opts in [
("SST", ["--publisher", "SST", self.test_file]),
("Dischord", ["--publisher=Dischord", self.test_file]),
]:
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.publisher == expected)
def testUniqueFileId_1(self):
with RedirectStdStreams() as out:
assert out
args, _, config = main.parseCommandLine(["--unique-file-id", "Travis:Me",
self.test_file])
retval = main.main(args, config)
assert retval == 0
af = eyed3.load(self.test_file)
assert len(af.tag.unique_file_ids) == 1
assert af.tag.unique_file_ids.get("Travis").uniq_id == b"Me"
def testUniqueFileId_dup(self):
with RedirectStdStreams() as out:
assert out
args, _, config = \
main.parseCommandLine(["--unique-file-id", "Travis:Me",
"--unique-file-id=Travis:Me",
self.test_file])
retval = main.main(args, config)
assert retval == 0
af = eyed3.load(self.test_file)
assert len(af.tag.unique_file_ids) == 1
assert af.tag.unique_file_ids.get("Travis").uniq_id == b"Me"
def testUniqueFileId_N(self):
# Add 3
with RedirectStdStreams() as out:
assert out
args, _, config = \
main.parseCommandLine(["--unique-file-id", "Travis:Me",
"--unique-file-id=Engine:Kid",
"--unique-file-id", "Owner:Kid",
self.test_file])
retval = main.main(args, config)
assert retval == 0
af = eyed3.load(self.test_file)
assert len(af.tag.unique_file_ids) == 3
assert af.tag.unique_file_ids.get("Travis").uniq_id == b"Me"
assert af.tag.unique_file_ids.get("Engine").uniq_id == b"Kid"
assert af.tag.unique_file_ids.get(b"Owner").uniq_id == b"Kid"
# Remove 2
with RedirectStdStreams() as out:
assert out
args, _, config = \
main.parseCommandLine(["--unique-file-id", "Travis:",
"--unique-file-id=Engine:",
"--unique-file-id", "Owner:Kid",
self.test_file])
retval = main.main(args, config)
assert retval == 0
af = eyed3.load(self.test_file)
assert len(af.tag.unique_file_ids) == 1
# Remove not found ID
with RedirectStdStreams() as out:
args, _, config = \
main.parseCommandLine(["--unique-file-id", "Travis:",
self.test_file])
retval = main.main(args, config)
assert retval == 0
sout = out.stdout.read()
assert "Unique file ID 'Travis' not found" in sout
af = eyed3.load(self.test_file)
assert len(af.tag.unique_file_ids) == 1
# TODO:
# --text-frame, --user-text-frame
# --url-frame, --user-user-frame
# --add-image, --remove-image, --remove-all-images, --write-images
# etc.
# --rename, --force-update, -1, -2, --exclude
def testNewTagSimpleComment(self, version=id3.ID3_DEFAULT_VERSION):
if version[0] == 1:
# No support for this in v1.x
return
for opts in [ ["-c", "Starlette", self.test_file],
["--comment=Starlette", self.test_file] ]:
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
assert (af.tag.comments[0].text == "Starlette")
assert (af.tag.comments[0].description == "")
def testAddRemoveComment(self, version=id3.ID3_DEFAULT_VERSION):
if version[0] == 1:
# No support for this in v1.x
return
comment = u"Why can't I be you?"
for i, (c, d, l) in enumerate([(comment, u"c0", None),
(comment, u"c1", None),
(comment, u"c2", 'eng'),
(u"¿Por qué no puedo ser tú ?", u"c2",
'esp'),
]):
darg = u":{}".format(d) if d else ""
larg = u":{}".format(l) if l else ""
opts = [u"--add-comment={c}{darg}{larg}".format(**locals()),
self.test_file]
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
tag_comment = af.tag.comments.get(d or u"",
lang=compat.b(l if l else "eng"))
assert (tag_comment.text == c)
assert (tag_comment.description == d or u"")
assert (tag_comment.lang == compat.b(l if l else "eng"))
for d, l in [(u"c0", None),
(u"c1", None),
(u"c2", "eng"),
(u"c2", "esp"),
]:
larg = u":{}".format(l) if l else ""
opts = [u"--remove-comment={d}{larg}".format(**locals()),
self.test_file]
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
tag_comment = af.tag.comments.get(d,
lang=compat.b(l if l else "eng"))
assert tag_comment is None
assert (len(af.tag.comments) == 0)
def testRemoveAllComments(self, version=id3.ID3_DEFAULT_VERSION):
if version[0] == 1:
# No support for this in v1.x
return
comment = u"Why can't I be you?"
for i, (c, d, l) in enumerate([(comment, u"c0", None),
(comment, u"c1", None),
(comment, u"c2", 'eng'),
(u"¿Por qué no puedo ser tú ?", u"c2",
'esp'),
(comment, u"c4", "ger"),
(comment, u"c4", "rus"),
(comment, u"c5", "rus"),
]):
darg = u":{}".format(d) if d else ""
larg = u":{}".format(l) if l else ""
opts = [u"--add-comment={c}{darg}{larg}".format(**locals()),
self.test_file]
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
tag_comment = af.tag.comments.get(d or u"",
lang=compat.b(l if l else "eng"))
assert (tag_comment.text == c)
assert (tag_comment.description == d or u"")
assert (tag_comment.lang == compat.b(l if l else "eng"))
opts = [u"--remove-all-comments", self.test_file]
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (len(af.tag.comments) == 0)
def testAddRemoveLyrics(self, version=id3.ID3_DEFAULT_VERSION):
if version[0] == 1:
# No support for this in v1.x
return
comment = u"Why can't I be you?"
for i, (c, d, l) in enumerate([(comment, u"c0", None),
(comment, u"c1", None),
(comment, u"c2", 'eng'),
(u"¿Por qué no puedo ser tú ?", u"c2",
'esp'),
]):
darg = u":{}".format(d) if d else ""
larg = u":{}".format(l) if l else ""
opts = [u"--add-comment={c}{darg}{larg}".format(**locals()),
self.test_file]
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
assert (af is not None)
assert (af.tag is not None)
tag_comment = af.tag.comments.get(d or u"",
lang=compat.b(l if l else "eng"))
assert (tag_comment.text == c)
assert (tag_comment.description == d or u"")
assert (tag_comment.lang == compat.b(l if l else "eng"))
for d, l in [(u"c0", None),
(u"c1", None),
(u"c2", "eng"),
(u"c2", "esp"),
]:
larg = u":{}".format(l) if l else ""
opts = [u"--remove-comment={d}{larg}".format(**locals()),
self.test_file]
self._addVersionOpt(version, opts)
with RedirectStdStreams() as out:
args, _, config = main.parseCommandLine(opts)
retval = main.main(args, config)
assert (retval == 0)
af = eyed3.load(self.test_file)
tag_comment = af.tag.comments.get(d,
lang=compat.b(l if l else "eng"))
assert tag_comment is None
assert (len(af.tag.comments) == 0)
def testNewTagAll(self, version=id3.ID3_DEFAULT_VERSION):
self.testNewTagArtist(version)
self.testNewTagAlbum(version)
self.testNewTagTitle(version)
self.testNewTagTrackNum(version)
self.testNewTagTrackTotal(version)
self.testNewTagGenre(version)
self.testNewTagYear(version)
self.testNewTagSimpleComment(version)
af = eyed3.load(self.test_file)
assert (af.tag.artist == u"The Cramps")
assert (af.tag.album == u"Psychedelic Jungle")
assert (af.tag.title == u"Green Door")
assert (af.tag.track_num == (14, 14 if version[0] != 1 else None))
assert ((af.tag.genre.name, af.tag.genre.id) == ("Rock", 17))
if version == id3.ID3_V2_3:
assert (af.tag.original_release_date.year == 1981)
else:
assert (af.tag.release_date.year == 1981)
if version[0] != 1:
assert (af.tag.comments[0].text == "Starlette")
assert (af.tag.comments[0].description == "")
assert (af.tag.version == version)
def testNewTagAllVersion1(self):
self.testNewTagAll(version=id3.ID3_V1_1)
def testNewTagAllVersion2_3(self):
self.testNewTagAll(version=id3.ID3_V2_3)
def testNewTagAllVersion2_4(self):
self.testNewTagAll(version=id3.ID3_V2_4)
## XXX: newer pytest test below.
def test_lyrics(audiofile, tmpdir, eyeD3):
lyrics_files = []
for i in range(1, 4):
lfile = tmpdir / "lryics{:d}".format(i)
lfile.write_text((six.u(str(i)) * (100 * i)), "utf8")
lyrics_files.append(lfile)
audiofile = eyeD3(audiofile,
["--add-lyrics", "{}".format(lyrics_files[0]),
"--add-lyrics", "{}:desc".format(lyrics_files[1]),
"--add-lyrics", "{}:foo:en".format(lyrics_files[1]),
"--add-lyrics", "{}:foo:es".format(lyrics_files[2]),
"--add-lyrics", "{}:foo:de".format(lyrics_files[0]),
])
assert len(audiofile.tag.lyrics) == 5
assert audiofile.tag.lyrics.get(u"").text == ("1" * 100)
assert audiofile.tag.lyrics.get(u"desc").text == ("2" * 200)
assert audiofile.tag.lyrics.get(u"foo", "en").text == ("2" * 200)
assert audiofile.tag.lyrics.get(u"foo", "es").text == ("3" * 300)
assert audiofile.tag.lyrics.get(u"foo", "de").text == ("1" * 100)
audiofile = eyeD3(audiofile, ["--remove-lyrics", "foo:xxx"])
assert len(audiofile.tag.lyrics) == 5
audiofile = eyeD3(audiofile, ["--remove-lyrics", "foo:es"])
assert len(audiofile.tag.lyrics) == 4
audiofile = eyeD3(audiofile, ["--remove-lyrics", "desc"])
assert len(audiofile.tag.lyrics) == 3
audiofile = eyeD3(audiofile, ["--remove-all-lyrics"])
assert len(audiofile.tag.lyrics) == 0
eyeD3(audiofile, ["--add-lyrics", "eminem.txt"], expected_retval=2)
@pytest.mark.coveragewhore
def test_all(audiofile, image, eyeD3):
audiofile = eyeD3(audiofile,
["--artist", "Cibo Matto",
"--album-artist", "Cibo Matto",
"--album", "Viva! La Woman",
"--title", "Apple",
"--track=1", "--track-total=11",
"--disc-num=1", "--disc-total=1",
"--genre", "Pop",
"--release-date=1996-01-16",
"--orig-release-date=1996-01-16",
"--recording-date=1995-01-16",
"--encoding-date=1999-01-16",
"--tagging-date=1999-01-16",
"--comment", "From Japan",
"--publisher=\'Warner Brothers\'",
"--play-count=666",
"--bpm=99",
"--unique-file-id", "mishmash:777abc",
"--add-comment", "Trip Hop",
"--add-comment", "Quirky:Mood",
"--add-comment", "Kimyōna:Mood:jp",
"--add-comment", "Test:XXX",
"--add-popularity", "travis@ppbox.com:212:999",
"--fs-encoding=latin1",
"--no-config",
"--add-object", "{}:image/gif".format(image),
"--composer", "Cibo Matto",
])
def test_removeTag_v1(audiofile, eyeD3):
assert audiofile.tag is None
audiofile = eyeD3(audiofile, ["-1", "-a", "Government Issue"])
assert audiofile.tag.version == id3.ID3_V1_0
audiofile = eyeD3(audiofile, ["--remove-v1"])
assert audiofile.tag is None
def test_removeTag_v2(audiofile, eyeD3):
assert audiofile.tag is None
audiofile = eyeD3(audiofile, ["-2", "-a", "Integrity"])
assert audiofile.tag.version == id3.ID3_V2_4
audiofile = eyeD3(audiofile, ["--remove-v2"])
assert audiofile.tag is None
def test_removeTagWithBoth_v1(audiofile, eyeD3):
audiofile = eyeD3(eyeD3(audiofile, ["-1", "-a", "Face Value"]),
["-2", "-a", "Poison Idea"])
v1_view = eyeD3(audiofile, ["-1"], reload_version=id3.ID3_V1)
v2_view = eyeD3(audiofile, ["-2"], reload_version=id3.ID3_V2)
assert audiofile.tag.version == id3.ID3_V2_4
assert v1_view.tag.version == id3.ID3_V1_0
assert v2_view.tag.version == id3.ID3_V2_4
audiofile = eyeD3(audiofile, ["--remove-v1"])
assert audiofile.tag.version == id3.ID3_V2_4
assert eyeD3(audiofile, ["-1"], reload_version=id3.ID3_V1).tag is None
v2_tag = eyeD3(audiofile, ["-2"], reload_version=id3.ID3_V2).tag
assert v2_tag is not None
assert v2_tag.artist == "Poison Idea"
def test_removeTagWithBoth_v2(audiofile, eyeD3):
audiofile = eyeD3(eyeD3(audiofile, ["-1", "-a", "Face Value"]),
["-2", "-a", "Poison Idea"])
v1_view = eyeD3(audiofile, ["-1"], reload_version=id3.ID3_V1)
v2_view = eyeD3(audiofile, ["-2"], reload_version=id3.ID3_V2)
assert audiofile.tag.version == id3.ID3_V2_4
assert v1_view.tag.version == id3.ID3_V1_0
assert v2_view.tag.version == id3.ID3_V2_4
audiofile = eyeD3(audiofile, ["--remove-v2"])
assert audiofile.tag.version == id3.ID3_V1_0
assert eyeD3(audiofile, ["-2"], reload_version=id3.ID3_V2).tag is None
v1_tag = eyeD3(audiofile, ["-1"], reload_version=id3.ID3_V1).tag
assert v1_tag is not None and v1_tag.artist == "Face Value"
def test_removeTagWithBoth_v2_withConvert(audiofile, eyeD3):
audiofile = eyeD3(eyeD3(audiofile, ["-1", "-a", "Face Value"]),
["-2", "-a", "Poison Idea"])
v1_view = eyeD3(audiofile, ["-1"], reload_version=id3.ID3_V1)
v2_view = eyeD3(audiofile, ["-2"], reload_version=id3.ID3_V2)
assert audiofile.tag.version == id3.ID3_V2_4
assert v1_view.tag.version == id3.ID3_V1_0
assert v2_view.tag.version == id3.ID3_V2_4
audiofile = eyeD3(audiofile, ["--remove-v2", "--to-v1"])
assert audiofile.tag.version == id3.ID3_V1_0
assert eyeD3(audiofile, ["-2"], reload_version=id3.ID3_V2).tag is None
v1_tag = eyeD3(audiofile, ["-1"], reload_version=id3.ID3_V1).tag
assert v1_tag is not None and v1_tag.artist == "Face Value"
def test_removeTagWithBoth_v1_withConvert(audiofile, eyeD3):
audiofile = eyeD3(eyeD3(audiofile, ["-1", "-a", "Face Value"]),
["-2", "-a", "Poison Idea"])
v1_view = eyeD3(audiofile, ["-1"], reload_version=id3.ID3_V1)
v2_view = eyeD3(audiofile, ["-2"], reload_version=id3.ID3_V2)
assert audiofile.tag.version == id3.ID3_V2_4
assert v1_view.tag.version == id3.ID3_V1_0
assert v2_view.tag.version == id3.ID3_V2_4
audiofile = eyeD3(audiofile, ["--remove-v1", "--to-v2.3"])
assert audiofile.tag.version == id3.ID3_V2_3
assert eyeD3(audiofile, ["-1"], reload_version=id3.ID3_V1).tag is None
v2_tag = eyeD3(audiofile, ["-2"], reload_version=id3.ID3_V2).tag
assert v2_tag is not None and v2_tag.artist == "Poison Idea"
| gaetano-guerriero/eyeD3-debian | src/test/test_classic_plugin.py | Python | gpl-3.0 | 34,165 |
from tree import *
if __name__ == '__main__':
root_node = Node(
5,
left=Node(3, left=Node(1, right=Node(2))),
right=Node(7, left=Node(6), right=Node(8))
)
tree = Tree(root_node)
print(tree)
print(tree.mirror())
| y-usuzumi/survive-the-course | random_questions/二叉树镜像/python/main.py | Python | bsd-3-clause | 257 |
'''
Sample custom functions plugin for formula XML Element, Attribute creation functions
>>> note that this function has been renamed xfi:create-element and moved to FunctionXfi.py <<<
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
from arelle import XPathContext, XbrlUtil
from arelle.ModelValue import qname, QName
from arelle.ModelInstanceObject import ModelDimensionValue, XmlUtil
from arelle.FunctionUtil import qnameArg, nodeArg, atomicArg
from arelle import XmlValidate
from lxml import etree
'''
Create an XML element in a "scratchpad" in-memory XML document, to behave like the results
of an fn:doc() that would provide XML elements which can be consumed by formula typed
dimension and OCC constructs.
The element may be created with attributes and descendant elements, as needed.
xfxc:element(
qname, // qname of element
(name-value pairs for creating attributes if any),
value, if any, otherwise () or ''
optional nested elements (e.g., xfc:element( ) ... of child nodes)
)
Attributes may be pairs of string name, value, or pairs of QName, value when attribute
name is qualified.
A function definition is required in the formula linkbase:
<variable:function name="xfxc:element" output="element()" xlink:type="resource" xlink:label="cust-fn-xfxc-create">
<variable:input type="xs:QName" /> <!-- qname of element to create -->
<variable:input type="xs:anyAtomicType*" /> <!-- sequence of name, value pairs for creating attributes (name can be string or QName) -->
<variable:input type="xs:anyAtomicType" /> <!-- optional value, () or '' if none -->
<variable:input type="element()*" /> <!-- optional sequence of child elements, this parameter can be omitted if no child elements -->
</variable:function>
'''
def xfxc_element(xc, p, contextItem, args):
if not 2 <= len(args) <= 4: raise XPathContext.FunctionNumArgs()
qn = qnameArg(xc, p, args, 0, 'QName', emptyFallback=None)
attrArg = args[1] if isinstance(args[1],(list,tuple)) else (args[1],)
# attributes have to be pairs
if attrArg:
if len(attrArg) & 1 or any(not isinstance(attrArg[i], (QName, _STR_BASE))
for i in range(0, len(attrArg),2)):
raise XPathContext.FunctionArgType(1,"((xs:qname|xs:string),xs:anyAtomicValue)", errCode="xfxce:AttributesNotNameValuePairs")
else:
attrParam = [(attrArg[i],attrArg[i+1]) # need name-value pairs for XmlUtil function
for i in range(0, len(attrArg),2)]
else:
attrParam = None
value = atomicArg(xc, p, args, 2, "xs:anyAtomicType", emptyFallback='')
if not value: # be sure '' is None so no text node is created
value = None
if len(args) < 4:
childElements = None
else:
childElements = xc.flattenSequence(args[3])
# scratchpad instance document emulates fn:doc( ) to hold XML nodes
scratchpadXmlDocUrl = "http://www.xbrl.org/2012/function/creation/xml_scratchpad.xml"
if scratchpadXmlDocUrl in xc.modelXbrl.urlDocs:
modelDocument = xc.modelXbrl.urlDocs[scratchpadXmlDocUrl]
else:
# create scratchpad xml document
# this will get the fake instance document in the list of modelXbrl docs so that it is garbage collected
from arelle import ModelDocument
modelDocument = ModelDocument.create(xc.modelXbrl,
ModelDocument.Type.UnknownXML,
scratchpadXmlDocUrl,
initialXml="<xfc:dummy xmlns:xfc='http://www.xbrl.org/2012/function/creation'/>")
newElement = XmlUtil.addChild(modelDocument.xmlRootElement,
qn,
attributes=attrParam,
text=value)
if childElements:
for element in childElements:
if isinstance(element, etree.ElementBase):
newElement.append(element)
# node myst be validated for use in instance creation (typed dimension references)
XmlValidate.validate(xc.modelXbrl, newElement)
return newElement
def xfxcFunctions():
return {
qname("{http://www.xbrl.org/2012/function/xml-creation}xfxc:element"): xfxc_element,
}
__pluginInfo__ = {
'name': 'Formula Xml Creation Functions',
'version': '1.0',
'description': "This plug-in adds a custom function to create xml elements, such as for typed dimensions, implemented by a plug-in. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'Formula.CustomFunctions': xfxcFunctions,
}
| sternshus/arelle2.7 | svr-2.7/arelle/plugin/functionsXmlCreation.py | Python | apache-2.0 | 4,852 |
from mailsnake import MailSnake
from mailsnake.exceptions import *
import settings
import logging
ms = MailSnake(settings.MAILCHIMP_API_KEY)
lists = ms.lists()
logger = logging.getLogger(__name__)
def subscribe_user(user):
try:
ms.listSubscribe(
id=lists['data'][0]['id'],
email_address=user.email,
merge_vars={
'USERNAME': user.username,
'FNAME': user.first_name or '',
'LNAME': user.last_name or '',
},
update_existing=True,
double_optin=False,
send_welcome=False,
)
except MailSnakeException:
logger.warn('MailChimp listSubscribe call failed for user %s' % user.email, exc_info=True)
| reverie/jotleaf.com | jotleaf/marketing/email_marketing.py | Python | mit | 760 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import codecs
import fnmatch
import inspect
import io
import locale
import logging
import os
import re
import tarfile
import tempfile
import threading
from collections import defaultdict
from datetime import datetime
from os.path import join
from babel.messages import extract
from lxml import etree, html
import odoo
from . import config, pycompat
from .misc import file_open, get_iso_codes, SKIPPED_ELEMENT_TYPES
from .osutil import walksymlinks
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style', 'title')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_Indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for Spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all English small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
# these direct uses of CSV are ok.
import csv # pylint: disable=deprecated-module
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
# FIXME: holy shit this whole thing needs to be cleaned up hard it's a mess
def encode(s):
assert isinstance(s, str)
return s
# which elements are translated inline
TRANSLATED_ELEMENTS = {
'abbr', 'b', 'bdi', 'bdo', 'br', 'cite', 'code', 'data', 'del', 'dfn', 'em',
'font', 'i', 'ins', 'kbd', 'keygen', 'mark', 'math', 'meter', 'output',
'progress', 'q', 'ruby', 's', 'samp', 'small', 'span', 'strong', 'sub',
'sup', 'time', 'u', 'var', 'wbr', 'text',
}
# which attributes must be translated
TRANSLATED_ATTRS = {
'string', 'help', 'sum', 'avg', 'confirm', 'placeholder', 'alt', 'title', 'aria-label',
'aria-keyshortcuts', 'aria-placeholder', 'aria-roledescription', 'aria-valuetext',
'value_label',
}
TRANSLATED_ATTRS = TRANSLATED_ATTRS | {'t-attf-' + attr for attr in TRANSLATED_ATTRS}
avoid_pattern = re.compile(r"\s*<!DOCTYPE", re.IGNORECASE | re.MULTILINE | re.UNICODE)
node_pattern = re.compile(r"<[^>]*>(.*)</[^<]*>", re.DOTALL | re.MULTILINE | re.UNICODE)
def translate_xml_node(node, callback, parse, serialize):
""" Return the translation of the given XML/HTML node.
:param callback: callback(text) returns translated text or None
:param parse: parse(text) returns a node (text is unicode)
:param serialize: serialize(node) returns unicode text
"""
def nonspace(text):
return bool(text) and len(re.sub(r'\W+', '', text)) > 1
def concat(text1, text2):
return text2 if text1 is None else text1 + (text2 or "")
def append_content(node, source):
""" Append the content of ``source`` node to ``node``. """
if len(node):
node[-1].tail = concat(node[-1].tail, source.text)
else:
node.text = concat(node.text, source.text)
for child in source:
node.append(child)
def translate_text(text):
""" Return the translation of ``text`` (the term to translate is without
surrounding spaces), or a falsy value if no translation applies.
"""
term = text.strip()
trans = term and callback(term)
return trans and text.replace(term, trans)
def translate_content(node):
""" Return ``node`` with its content translated inline. """
# serialize the node that contains the stuff to translate
text = serialize(node)
# retrieve the node's content and translate it
match = node_pattern.match(text)
trans = translate_text(match.group(1))
if trans:
# replace the content, and convert it back to an XML node
text = text[:match.start(1)] + trans + text[match.end(1):]
try:
node = parse(text)
except etree.ParseError:
# fallback: escape the translation as text
node = etree.Element(node.tag, node.attrib, node.nsmap)
node.text = trans
return node
def process(node):
""" If ``node`` can be translated inline, return ``(has_text, node)``,
where ``has_text`` is a boolean that tells whether ``node`` contains
some actual text to translate. Otherwise return ``(None, result)``,
where ``result`` is the translation of ``node`` except for its tail.
"""
if (
isinstance(node, SKIPPED_ELEMENT_TYPES) or
node.tag in SKIPPED_ELEMENTS or
node.get('t-translation', "").strip() == "off" or
node.tag == 'attribute' and node.get('name') not in TRANSLATED_ATTRS or
node.getparent() is None and avoid_pattern.match(node.text or "")
):
return (None, node)
# make an element like node that will contain the result
result = etree.Element(node.tag, node.attrib, node.nsmap)
# use a "todo" node to translate content by parts
todo = etree.Element('div', nsmap=node.nsmap)
if avoid_pattern.match(node.text or ""):
result.text = node.text
else:
todo.text = node.text
todo_has_text = nonspace(todo.text)
# process children recursively
for child in node:
child_has_text, child = process(child)
if child_has_text is None:
# translate the content of todo and append it to result
append_content(result, translate_content(todo) if todo_has_text else todo)
# add translated child to result
result.append(child)
# move child's untranslated tail to todo
todo = etree.Element('div', nsmap=node.nsmap)
todo.text, child.tail = child.tail, None
todo_has_text = nonspace(todo.text)
else:
# child is translatable inline; add it to todo
todo.append(child)
todo_has_text = todo_has_text or child_has_text
# determine whether node is translatable inline
if (
node.tag in TRANSLATED_ELEMENTS and
not (result.text or len(result)) and
not any(name.startswith("t-") for name in node.attrib)
):
# complete result and return it
append_content(result, todo)
result.tail = node.tail
has_text = (
todo_has_text or nonspace(result.text) or nonspace(result.tail)
or any((key in TRANSLATED_ATTRS and val) for key, val in result.attrib.items())
)
return (has_text, result)
# translate the content of todo and append it to result
append_content(result, translate_content(todo) if todo_has_text else todo)
# translate the required attributes
for name, value in result.attrib.items():
if name in TRANSLATED_ATTRS:
result.set(name, translate_text(value) or value)
# add the untranslated tail to result
result.tail = node.tail
return (None, result)
has_text, node = process(node)
if has_text is True:
# translate the node as a whole
wrapped = etree.Element('div')
wrapped.append(node)
return translate_content(wrapped)[0]
return node
def parse_xml(text):
return etree.fromstring(text)
def serialize_xml(node):
return etree.tostring(node, method='xml', encoding='unicode')
_HTML_PARSER = etree.HTMLParser(encoding='utf8')
def parse_html(text):
return html.fragment_fromstring(text, parser=_HTML_PARSER)
def serialize_html(node):
return etree.tostring(node, method='html', encoding='unicode')
def xml_translate(callback, value):
""" Translate an XML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
try:
root = parse_xml(value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
return serialize_xml(result)
except etree.ParseError:
# fallback for translated terms: use an HTML parser and wrap the term
root = parse_html(u"<div>%s</div>" % value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
# remove tags <div> and </div> from result
return serialize_xml(result)[5:-6]
def html_translate(callback, value):
""" Translate an HTML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
try:
# value may be some HTML fragment, wrap it into a div
root = parse_html("<div>%s</div>" % value)
result = translate_xml_node(root, callback, parse_html, serialize_html)
# remove tags <div> and </div> from result
value = serialize_html(result)[5:-6]
except ValueError:
_logger.exception("Cannot translate malformed HTML, using source value instead")
return value
#
# Warning: better use self.env['ir.translation']._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s and md5(src)=md5(%s)', (lang, source_type, str(name), source, source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s and md5(src)=md5(%s)', (lang, source_type, source, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return odoo.sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from odoo.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from odoo.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the original uid, so the language may
# be wrong when the admin language differs.
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if cr and uid:
env = odoo.api.Environment(cr, uid, {})
lang = env['res.users'].context_get()['lang']
return lang
def __call__(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
res = env['ir.translation']._get_source(None, ('code','sql_constraint'), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
cr.close()
return res
_ = GettextAlias()
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
re_escaped_char = re.compile(r"(\\.)")
re_escaped_replacements = {'n': '\n', 't': '\t',}
def _sub_replacement(match_obj):
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
# class to handle po files
class PoFile(object):
def __init__(self, buffer):
# TextIOWrapper closes its underlying buffer on close *and* can't
# handle actual file objects (on python 2)
self.buffer = codecs.StreamReaderWriter(
stream=buffer,
Reader=codecs.getreader('utf-8'),
Writer=codecs.getwriter('utf-8')
)
def __iter__(self):
self.buffer.seek(0)
self.lines = self._get_lines()
self.lines_count = len(self.lines)
self.first = True
self.extra_lines= []
return self
def _get_lines(self):
lines = self.buffer.readlines()
# remove the BOM (Byte Order Mark):
if len(lines):
lines[0] = lines[0].lstrip(u"\ufeff")
lines.append('') # ensure that the file ends with at least an empty line
return lines
def cur_line(self):
return self.lines_count - len(self.lines)
def next(self):
trans_type = name = res_id = source = trad = module = None
if self.extra_lines:
trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0)
if not res_id:
res_id = '0'
else:
comments = []
targets = []
line = None
fuzzy = False
while not line:
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0).strip()
while line.startswith('#'):
if line.startswith('#~ '):
break
if line.startswith('#.'):
line = line[2:].strip()
if not line.startswith('module:'):
comments.append(line)
else:
module = line[7:].strip()
elif line.startswith('#:'):
# Process the `reference` comments. Each line can specify
# multiple targets (e.g. model, view, code, selection,
# ...). For each target, we will return an additional
# entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
# looks like the translation trans_type is missing, which is not
# unexpected because it is not a GetText standard. Default: 'code'
trans_info[:0] = ['code']
if trans_info and len(trans_info) == 3:
# this is a ref line holding the destination info (model, field, record)
targets.append(trans_info)
elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
fuzzy = True
line = self.lines.pop(0).strip()
if not self.lines:
raise StopIteration()
while not line:
# allow empty lines between comments and msgid
line = self.lines.pop(0).strip()
if line.startswith('#~ '):
while line.startswith('#~ ') or not line.strip():
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0)
# This has been a deprecated entry, don't return anything
return next(self)
if not line.startswith('msgid'):
raise Exception("malformed file: bad line: %s" % line)
source = unquote(line[6:])
line = self.lines.pop(0).strip()
if not source and self.first:
self.first = False
# if the source is "" and it's the first msgid, it's the special
# msgstr with the information about the translate and the
# translator; we skip it
self.extra_lines = []
while line:
line = self.lines.pop(0).strip()
return next(self)
while not line.startswith('msgstr'):
if not line:
raise Exception('malformed file at %d'% self.cur_line())
source += unquote(line)
line = self.lines.pop(0).strip()
trad = unquote(line[7:])
line = self.lines.pop(0).strip()
while line:
trad += unquote(line)
line = self.lines.pop(0).strip()
if targets and not fuzzy:
# Use the first target for the current entry (returned at the
# end of this next() call), and keep the others to generate
# additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
code = trans_type == 'code'
for t, n, r in targets:
if t == 'code' and code: continue
if t == 'code':
code = True
self.extra_lines.append((t, n, r, source, trad, comments))
if name is None:
if not fuzzy:
_logger.warning('Missing "#:" formatted comment at line %d for the following source:\n\t%s',
self.cur_line(), source[:30])
return next(self)
return trans_type, name, res_id, source, trad, '\n'.join(comments), module
__next__ = next
def write_infos(self, modules):
import odoo.release as release
self.buffer.write(u"# Translation of %(project)s.\n" \
"# This file contains the translation of the following modules:\n" \
"%(modules)s" \
"#\n" \
"msgid \"\"\n" \
"msgstr \"\"\n" \
'''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
'''"Report-Msgid-Bugs-To: \\n"\n''' \
'''"POT-Creation-Date: %(now)s\\n"\n''' \
'''"PO-Revision-Date: %(now)s\\n"\n''' \
'''"Last-Translator: <>\\n"\n''' \
'''"Language-Team: \\n"\n''' \
'''"MIME-Version: 1.0\\n"\n''' \
'''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
'''"Content-Transfer-Encoding: \\n"\n''' \
'''"Plural-Forms: \\n"\n''' \
"\n"
% { 'project': release.description,
'version': release.version,
'modules': ''.join("#\t* %s\n" % m for m in modules),
'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
}
)
def write(self, modules, tnrs, source, trad, comments=None):
plurial = len(modules) > 1 and 's' or ''
self.buffer.write(u"#. module%s: %s\n" % (plurial, ', '.join(modules)))
if comments:
self.buffer.write(u''.join(('#. %s\n' % c for c in comments)))
code = False
for typy, name, res_id in tnrs:
self.buffer.write(u"#: %s:%s:%s\n" % (typy, name, res_id))
if typy == 'code':
code = True
if code:
# only strings in python code are python formatted
self.buffer.write(u"#, python-format\n")
msg = (
u"msgid %s\n"
u"msgstr %s\n\n"
) % (
quote(str(source)),
quote(str(trad))
)
self.buffer.write(msg)
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
def _process(format, modules, rows, buffer, lang):
if format == 'csv':
writer = pycompat.csv_writer(buffer, dialect='UNIX')
# write header first
writer.writerow(("module","type","name","res_id","src","value","comments"))
for module, type, name, res_id, src, trad, comments in rows:
comments = '\n'.join(comments)
writer.writerow((module, type, name, res_id, src, trad, comments))
elif format == 'po':
writer = PoFile(buffer)
writer.write_infos(modules)
# we now group the translations by source. That means one translation per source.
grouped_rows = {}
for module, type, name, res_id, src, trad, comments in rows:
row = grouped_rows.setdefault(src, {})
row.setdefault('modules', set()).add(module)
if not row.get('translation') and trad != src:
row['translation'] = trad
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
for src, row in sorted(grouped_rows.items()):
if not lang:
# translation template, so no translation value
row['translation'] = ''
elif not row.get('translation'):
row['translation'] = ''
writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments'])
elif format == 'tgz':
rows_by_module = defaultdict(list)
for row in rows:
module = row[0]
rows_by_module[module].append(row)
with tarfile.open(fileobj=buffer, mode='w|gz') as tar:
for mod, modrows in rows_by_module.items():
with io.BytesIO() as buf:
_process('po', [mod], modrows, buf, lang)
buf.seek(0)
info = tarfile.TarInfo(
join(mod, 'i18n', '{basename}.{ext}'.format(
basename=lang or mod,
ext='po' if lang else 'pot',
)))
# addfile will read <size> bytes from the buffer so
# size *must* be set first
info.size = len(buf.getvalue())
tar.addfile(info, fileobj=buf)
else:
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).') % format)
translations = trans_generate(lang, modules, cr)
modules = set(t[0] for t in translations)
_process(format, modules, translations, buffer, lang)
del translations
def trans_parse_rml(de):
res = []
for n in de:
for m in n:
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
continue
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
res.append(s.encode("utf8"))
res.extend(trans_parse_rml(n))
return res
def _push(callback, term, source_line):
""" Sanity check before pushing translation terms """
term = (term or "").strip()
# Avoid non-char tokens like ':' '...' '.00' etc.
if len(term) > 8 or any(x.isalpha() for x in term):
callback(term, source_line)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
if 'all' in modules:
return True
module_dict = {
'ir': 'base',
'res': 'base',
}
module = object_name.split('.')[0]
module = module_dict.get(module, module)
return module in modules
def _extract_translatable_qweb_terms(element, callback):
""" Helper method to walk an etree document representing
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
:param etree._Element element: root of etree document to extract terms from
:param Callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
for el in element:
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
for att in ('title', 'alt', 'label', 'placeholder'):
if att in el.attrib:
_push(callback, el.attrib[att], el.sourceline)
_extract_translatable_qweb_terms(el, callback)
_push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: Iterable
"""
result = []
def handle_text(text, lineno):
result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
def trans_generate(lang, modules, cr):
env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
to_translate = set()
def push_translation(module, type, name, id, source, comments=None):
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
sanitized_term = (source or '').strip()
# remove non-alphanumeric chars
sanitized_term = re.sub(r'\W+', '', sanitized_term)
if not sanitized_term or len(sanitized_term) <= 1:
return
tnx = (module, source, name, id, type, tuple(comments or ()))
to_translate.add(tnx)
query = 'SELECT min(name), model, res_id, module FROM ir_model_data'
query_models = """SELECT m.id, m.model, imd.module
FROM ir_model AS m, ir_model_data AS imd
WHERE m.id = imd.res_id AND imd.model = 'ir.model'"""
if 'all_installed' in modules:
query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
if 'all' not in modules:
query += ' WHERE module IN %s'
query_models += ' AND imd.module IN %s'
query_param = (tuple(modules),)
else:
query += ' WHERE module != %s'
query_models += ' AND imd.module != %s'
query_param = ('__export__',)
query += ' GROUP BY model, res_id, module ORDER BY module, model, min(name)'
query_models += ' ORDER BY module, model'
cr.execute(query, query_param)
for (xml_name, model, res_id, module) in cr.fetchall():
xml_name = "%s.%s" % (module, xml_name)
if model not in env:
_logger.error(u"Unable to find object %r", model)
continue
record = env[model].browse(res_id)
if not record._translate:
# explicitly disabled
continue
if not record.exists():
_logger.warning(u"Unable to find object %r with id %d", model, res_id)
continue
if model==u'ir.model.fields':
try:
field_name = record.name
except AttributeError as exc:
_logger.error(u"name error in %s: %s", xml_name, str(exc))
continue
field_model = env.get(record.model)
if (field_model is None or not field_model._translate or
field_name not in field_model._fields):
continue
field = field_model._fields[field_name]
if isinstance(getattr(field, 'selection', None), (list, tuple)):
name = "%s,%s" % (record.model, field_name)
for dummy, val in field.selection:
push_translation(module, 'selection', name, 0, val)
for field_name, field in record._fields.items():
if field.translate:
name = model + "," + field_name
try:
value = record[field_name] or ''
except Exception:
continue
for term in set(field.get_trans_terms(value)):
trans_type = 'model_terms' if callable(field.translate) else 'model'
push_translation(module, trans_type, name, xml_name, term)
# End of data for ir.model.data query results
def push_constraint_msg(module, term_type, model, msg):
if not callable(msg):
push_translation(encode(module), term_type, encode(model), 0, msg)
def push_local_constraints(module, model, cons_type='sql_constraints'):
""" Climb up the class hierarchy and ignore inherited constraints from other modules. """
term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint'
msg_pos = 2 if cons_type == 'sql_constraints' else 1
for cls in model.__class__.__mro__:
if getattr(cls, '_module', None) != module:
continue
constraints = getattr(cls, '_local_' + cons_type, [])
for constraint in constraints:
push_constraint_msg(module, term_type, model._name, constraint[msg_pos])
cr.execute(query_models, query_param)
for (_, model, module) in cr.fetchall():
if model not in env:
_logger.error("Unable to find object %r", model)
continue
Model = env[model]
if Model._constraints:
push_local_constraints(module, Model, 'constraints')
if Model._sql_constraints:
push_local_constraints(module, Model, 'sql_constraints')
installed_modules = [
m['name']
for m in env['ir.module.module'].search_read([('state', '=', 'installed')], fields=['name'])
]
path_list = [(path, True) for path in odoo.modules.module.ad_paths]
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
path_list.append((os.path.join(config['root_path'], bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
path_list.append((config['root_path'], False))
_logger.debug("Scanning modules at paths: %s", path_list)
def get_module_from_path(path):
for (mp, rec) in path_list:
mp = os.path.join(mp, '')
dirname = os.path.join(os.path.dirname(path), '')
if rec and path.startswith(mp) and dirname != mp:
path = path[len(mp):]
return path.split(os.path.sep)[0]
return 'base' # files that are not in a module are considered as being in 'base' module
def verified_module_filepaths(fname, path, root):
fabsolutepath = join(root, fname)
frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath
module = get_module_from_path(fabsolutepath)
if ('all' in modules or module in modules) and module in installed_modules:
if os.path.sep != '/':
display_path = display_path.replace(os.path.sep, '/')
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root)
extra_comments = extra_comments or []
if not module: return
src_file = open(fabsolutepath, 'rb')
try:
for extracted in extract.extract(extract_method, src_file, keywords=extract_keywords):
# Babel 0.9.6 yields lineno, message, comments
# Babel 1.3 yields lineno, message, comments, context
lineno, message, comments = extracted[:3]
push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
src_file.close()
for (path, recursive) in path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in walksymlinks(path):
for fname in fnmatch.filter(files, '*.py'):
babel_extract_terms(fname, path, root)
# Javascript source files in the static/src/js directory, rest is ignored (libs)
if fnmatch.fnmatch(root, '*/static/src/js*'):
for fname in fnmatch.filter(files, '*.js'):
babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[WEB_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
# QWeb template files
if fnmatch.fnmatch(root, '*/static/src/xml*'):
for fname in fnmatch.filter(files, '*.xml'):
babel_extract_terms(fname, path, root, 'odoo.tools.translate:babel_extract_qweb',
extra_comments=[WEB_TRANSLATION_COMMENT])
if not recursive:
# due to topdown, first iteration is in first level
break
out = []
# translate strings marked as to be translated
Translation = env['ir.translation']
for module, source, name, id, type, comments in sorted(to_translate):
trans = Translation._get_source(name, type, lang, source) if lang else ""
out.append((module, type, name, id, source, encode(trans) or '', comments))
return out
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None):
try:
with file_open(filename, mode='rb') as fileobj:
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context)
return result
except IOError:
if verbose:
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None):
"""Populates the ir_translation table."""
if verbose:
_logger.info('loading translation file for language %s', lang)
env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, context or {})
Lang = env['res.lang']
Translation = env['ir.translation']
try:
if not Lang.search_count([('code', '=', lang)]):
# lets create the language with locale information
Lang.load_lang(lang=lang, lang_name=lang_name)
# Parse also the POT: it will possibly provide additional targets.
# (Because the POT comments are correct on Launchpad but not the
# PO comments due to a Launchpad limitation. See LP bug 933496.)
pot_reader = []
use_pot_reference = False
# now, the serious things: we read the language file
fileobj.seek(0)
if fileformat == 'csv':
reader = pycompat.csv_reader(fileobj, quotechar='"', delimiter=',')
# read the first line of the file (it contains columns titles)
fields = next(reader)
elif fileformat == 'po':
reader = PoFile(fileobj)
fields = ['type', 'name', 'res_id', 'src', 'value', 'comments', 'module']
# Make a reader for the POT file and be somewhat defensive for the
# stable branch.
# when fileobj is a TemporaryFile, its name is an interget in P3, a string in P2
if isinstance(fileobj.name, str) and fileobj.name.endswith('.po'):
try:
# Normally the path looks like /path/to/xxx/i18n/lang.po
# and we try to find the corresponding
# /path/to/xxx/i18n/xxx.pot file.
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
addons_module_i18n, _ignored = os.path.split(fileobj.name)
addons_module, i18n_dir = os.path.split(addons_module_i18n)
addons, module = os.path.split(addons_module)
pot_handle = file_open(os.path.join(
addons, module, i18n_dir, module + '.pot'), mode='rb')
pot_reader = PoFile(pot_handle)
use_pot_reference = True
except:
pass
else:
_logger.info('Bad file format: %s', fileformat)
raise Exception(_('Bad file format: %s') % fileformat)
# Read the POT references, and keep them indexed by source string.
class Target(object):
def __init__(self):
self.value = None
self.targets = set() # set of (type, name, res_id)
self.comments = None
pot_targets = defaultdict(Target)
for type, name, res_id, src, _ignored, comments, module in pot_reader:
if type is not None:
target = pot_targets[src]
target.targets.add((type, name, type != 'code' and res_id or 0))
target.comments = comments
# read the rest of the file
irt_cursor = Translation._get_import_cursor()
def process_row(row):
"""Process a single PO (or POT) entry."""
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
# 'src': ..., 'value': ..., 'module':...}
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
'comments', 'imd_model', 'imd_name', 'module'))
dic['lang'] = lang
dic.update(zip(fields, row))
# do not import empty values
if not env.context.get('create_empty_translation', False) and not dic['value']:
return
if use_pot_reference:
# discard the target from the POT targets.
src = dic['src']
target_key = (dic['type'], dic['name'], dic['type'] != 'code' and dic['res_id'] or 0)
target = pot_targets.get(src)
if not target or target_key not in target.targets:
_logger.info("Translation '%s' (%s, %s, %s) not found in reference pot, skipping",
src[:60], dic['type'], dic['name'], dic['res_id'])
return
target.value = dic['value']
target.targets.discard(target_key)
# This would skip terms that fail to specify a res_id
res_id = dic['res_id']
if not res_id and dic['type'] != 'code':
return
if isinstance(res_id, int) or \
(isinstance(res_id, str) and res_id.isdigit()):
dic['res_id'] = int(res_id)
if module_name:
dic['module'] = module_name
else:
# res_id is an xml id
dic['res_id'] = None
dic['imd_model'] = dic['name'].split(',')[0]
if '.' in res_id:
dic['module'], dic['imd_name'] = res_id.split('.', 1)
else:
dic['module'], dic['imd_name'] = module_name, res_id
irt_cursor.push(dic)
# First process the entries from the PO file (doing so also fills/removes
# the entries from the POT file).
for row in reader:
process_row(row)
if use_pot_reference:
# Then process the entries implied by the POT file (which is more
# correct w.r.t. the targets) if some of them remain.
pot_rows = []
for src, target in pot_targets.items():
if target.value:
for type, name, res_id in target.targets:
pot_rows.append((type, name, res_id, src, target.value, target.comments))
for row in pot_rows:
process_row(row)
irt_cursor.finish()
Translation.clear_caches()
if verbose:
_logger.info("translation file loaded successfully")
except IOError:
iso_lang = get_iso_codes(lang)
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:
lang = locale.getdefaultlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
nln = locale.normalize(ln)
if nln != ln:
yield nln
for x in process('utf8'): yield x
prefenc = locale.getpreferredencoding()
if prefenc:
for x in process(prefenc): yield x
prefenc = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}.get(prefenc.lower())
if prefenc:
for x in process(prefenc): yield x
yield lang
def resetlocale():
# locale.resetlocale is bugged with some locales.
for ln in get_locales():
try:
return locale.setlocale(locale.LC_ALL, ln)
except locale.Error:
continue
def load_language(cr, lang):
""" Loads a translation terms for a language.
Used mainly to automate language loading at db initialization.
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
:type lang: str
"""
env = odoo.api.Environment(cr, odoo.SUPERUSER_ID, {})
installer = env['base.language.install'].create({'lang': lang})
installer.lang_install()
| t3dev/odoo | odoo/tools/translate.py | Python | gpl-3.0 | 48,503 |
__author__ = 'nash.xiejun'
import logging
import traceback
import os
from oslo.config import cfg
from common import config
from common.engineering_logging import log_for_func_of_class
import utils
from utils import AllInOneUsedCMD, print_log, ELog
from common.config import ConfigCommon
from services import RefServices
from common.econstants import ConfigReplacement
logger_name = __name__
module_logger = logging.getLogger(__name__)
logger = ELog(module_logger)
CONF = cfg.CONF
class ValidateBase(object):
@log_for_func_of_class(logger_name)
def validate(self):
return True
class ConfiguratorBase(object):
@log_for_func_of_class(logger_name)
def config(self):
return True
class InstallerBase(object):
@log_for_func_of_class(logger_name)
def install(self):
return True
class CheckBase(object):
@log_for_func_of_class(logger_name)
def check(self):
return True
class EnginneringFactory(object):
def __init__(self, factory_name, validator=ValidateBase(), installer=InstallerBase(), configurator=ConfiguratorBase(), checker=CheckBase()):
self.factory_name = factory_name
self.validator = validator
self.installer = installer
self.configurator = configurator
self.checker = checker
def instance(self):
return self
def execute(self):
big_sep = '*********************************************************'
logger.info('')
logger.info(big_sep)
logger.info('**** Start to deploy for >>>> %s <<<< ****' % self.factory_name)
logger.info(big_sep)
execute_result = True
sep = '****************************'
logger.info(sep)
validate_result = self.validator.validate()
logger.info(sep)
logger.info(sep)
install_result = self.installer.install()
logger.info(sep)
logger.info(sep)
config_result = self.configurator.config()
logger.info(sep)
logger.info(sep)
check_result = self.checker.check()
logger.info(sep)
logger.info(big_sep)
logger.info('**** SUCCESS to deploy for >>>> %s <<<< ****' % self.factory_name)
logger.info(big_sep)
class HostnameConfigurator(ConfiguratorBase):
def config(self):
try:
self._config_etc_hostname()
self._config_etc_hosts()
AllInOneUsedCMD.reboot()
except:
logger.error('Exception occur when config hostname. EXCEPTION: %s' % traceback.format_exc())
def _config_etc_hostname(self):
logger.info('Start to config hostname file')
with open(config.CONF.file_hostname, 'w') as hostname_file:
hostname_file.truncate()
hostname_file.write(config.CONF.sysconfig.hostname)
logger.info('Success to config hostname file, /etc/hostname')
def _config_etc_hosts(self):
logger.info('Start to config hosts file')
modified_contents = ''
with open(config.CONF.file_hosts, 'r') as hosts_file:
for line in hosts_file:
if modified_contents == '':
modified_contents = line.replace('openstack', config.CONF.sysconfig.hostname)
else:
modified_contents = ''.join([modified_contents, line.replace('openstack',
config.CONF.sysconfig.hostname)])
with open(config.CONF.file_hosts, 'w') as hosts_file:
hosts_file.truncate()
hosts_file.write(modified_contents)
logger.info('Config hosts file success, /etc/hosts')
class AllInOneConfigurator(ConfiguratorBase):
@log_for_func_of_class(logger_name)
def config(self):
result = 'SUCCESS'
try:
# AllInOneUsedCMD.rabbitmq_changed_pwd()
self._config_rabbitmq_pwd()
self._config_rc_local()
self._config_nova_conf()
self._config_neutron_conf()
self._copy_self_define_ml2()
self._config_ml2_ini()
self._config_sysctl()
self._config_l3_agent()
self._config_dhcp_agent()
self._config_metadata_agent()
except:
logger.error('Exception occur when All-In-One Config. EXCEPTION: %s' % traceback.format_exc())
result = 'FAILED'
return result
@log_for_func_of_class(logger_name)
def _config_rabbitmq_pwd(self):
result = 'Failed'
try:
AllInOneUsedCMD.rabbitmq_changed_pwd()
except:
logger.error('Exception occur when config rabbitMQ. EXCEPTION: %s' % traceback.format_exc())
result = 'Failed'
return result
@log_for_func_of_class(logger_name)
def _config_rc_local(self):
result = False
try:
contents = ['service nova-cert restart\n',
'service nova-scheduler restart\n',
'ifconfig br-ex:0 %s netmask 255.255.255.0\n' % config.CONF.sysconfig.ml2_local_ip,
'exit 0']
with open(config.CONF.rc_local_file, 'w') as rc_local_file:
rc_local_file.truncate()
rc_local_file.writelines(contents)
result = True
except:
logger.error('Exception occur when config rc.local. EXCEPTION: %s' % traceback.format_exc())
return result
@log_for_func_of_class(logger_name)
def _config_nova_conf(self):
result = False
try:
vncserver_listen = '0.0.0.0'
path_nova_conf_file = config.CONF.path_nova_conf
config_common = ConfigCommon(path_nova_conf_file)
config_common.set_default('vncserver_listen', vncserver_listen)
config_common.set_default('service_metadata_proxy', 'False')
config_common.set_default('metadata_proxy_shared_secret', 'openstack')
config_common.write_commit()
result = True
except:
logger.error('Exception occur when config nova.conf. EXCEPTION: %s' % traceback.format_exc())
return result
@log_for_func_of_class(logger_name)
def _config_neutron_conf(self):
result = False
path_neutron_conf = config.CONF.path_neutron_conf
try:
self._config_tenant_id_in_neutron_conf(path_neutron_conf)
result = True
except:
logger.error('Exception occur when config neutron conf, EXCEPTION: %s' % traceback.format_exc())
return result
def _config_tenant_id_in_neutron_conf(self, path_neutron_conf):
option_nova_admin_tenant_id = 'nova_admin_tenant_id'
# get tenant id
value_nova_admin_tenant_id = RefServices().get_tenant_id_for_service()
logger.info('tenant id of service is <%s>' % (value_nova_admin_tenant_id))
config_common = ConfigCommon(path_neutron_conf)
config_common.set_default(option_nova_admin_tenant_id, value_nova_admin_tenant_id)
config_common.write_commit()
@log_for_func_of_class
def _copy_self_define_ml2(self):
result = False
try:
self_define_ml2_file = os.path.split(os.path.realpath(__file__))[0] +'/config/ml2_conf.ini'
destiny = config.CONF.path_ml2_ini
result = AllInOneUsedCMD.cp_to(self_define_ml2_file, str(destiny))
except:
err_info = 'Exception occur when copy self define ml2 file. Exception: %s' % traceback.format_exc()
print err_info
logger.error(err_info)
return result
@log_for_func_of_class(logger_name)
def _config_ml2_ini(self):
result = False
try:
ml2_section_ovf = 'ovs'
option_local_ip = 'local_ip'
config_common = ConfigCommon(config.CONF.path_ml2_ini)
config_common.set_option(ml2_section_ovf, option_local_ip, config.CONF.sysconfig.ml2_local_ip)
config_common.write_commit()
result = True
except:
err_info = 'Exception occur when config ml2_conf.ini. Exception: %s' % traceback.format_exc()
print err_info
logger.error(err_info)
return result
@log_for_func_of_class(logger_name)
def _config_sysctl(self):
result = False
try:
option_all_rp_filter = 'net.ipv4.conf.all.rp_filter=0'
option_default_rp_filter = 'net.ipv4.conf.default.rp_filter=0'
contents = [option_all_rp_filter,
option_default_rp_filter]
with open(config.CONF.path_sysctl, 'w') as sysctl_file:
sysctl_file.writelines(contents)
result = True
except:
err_info = 'Exception occur when config sysctl.conf. Exception: %s' % traceback.format_exc()
logger.error(err_info)
print(err_info)
return result
@log_for_func_of_class(logger_name)
def _config_l3_agent(self):
"""
write external_network_bridge=br-ex to /etc/neutron/l3_agent.ini.
[DEFAULT]
external_network_bridge = br-ex
:return: boolean
"""
result = False
try:
option_external_network_bridge = 'external_network_bridge'
value_external_network_bridge = 'br-ex'
config_common = ConfigCommon(config.CONF.path_l3_agent)
config_common.set_default( option_external_network_bridge, value_external_network_bridge)
result = True
except:
err_info = 'Exception occur when config l3_agent.ini. Exception: %s' % traceback.format_exc()
logger.error(err_info)
print(err_info)
return result
@log_for_func_of_class(logger_name)
def _config_dhcp_agent(self):
"""
config /etc/neutron/dhcp_agent.ini,
set following:
#vi /etc/neutron/dhcp_agent.ini
[DEFAULT]
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
use_namespaces = False
:return:
"""
result = False
try:
option_dhcp_driver = 'dhcp_driver'
value_dhcp_driver = 'neutron.agent.linux.dhcp.Dnsmasq'
option_use_namespaces = 'use_namespace'
value_use_namespaces = 'False'
common_config = ConfigCommon(config.CONF.path_dhcp_agent_ini)
common_config.set_default(option_dhcp_driver, value_dhcp_driver)
common_config.set_default(option_use_namespaces, value_use_namespaces)
common_config.write_commit()
result = True
except:
err_info = 'Exception occur when config dhcp_agent.ini. Exception: %s' % traceback.format_exc()
logger.error(err_info)
print(err_info)
return result
@log_for_func_of_class(logger_name)
def _config_metadata_agent(self):
"""
# vi /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_ip = 162.3.110.71
metadata_proxy_shared_secret = openstack
:return:
"""
result = True
try:
option_nova_metadata_ip = 'nova_metadata_ip'
value_nova_metadata_ip = config.CONF.sysconfig.local_host_ip
option_metadata_proxy_shared_secret = 'metadata_proxy_shared_secret'
value_metadata_proxy_shared_secret = 'openstack'
config_common = ConfigCommon(config.CONF.path_metadata_agent_ini)
config_common.set_default(option_nova_metadata_ip,
value_nova_metadata_ip)
config_common.set_default(option_metadata_proxy_shared_secret,
value_metadata_proxy_shared_secret)
config_common.write_commit()
result = True
except:
err_info = 'Exception occur when config dhcp_agent.ini. Exception: %s' % traceback.format_exc()
logger.error(err_info)
print(err_info)
return result
class PatchInstaller(InstallerBase):
def __init__(self, patch_path, openstack_install_path, filters):
"""
:param patch_path:
for example: /root/tricircle-master/novaproxy/
/root/tricircle-master/juno-patches/nova_scheduling_patch/
:param openstack_install_path:
for example: '/usr/lib/python2.7/dist-packages/'
:param filters:
for example: ['.py']
:return:
"""
# patch_path is /root/tricircle-master/juno-patches/nova/nova_scheduling_patch/
self.patch_path = patch_path
# install_path is openstack installed path'/usr/lib/python2.7/dist-packages/'
self.openstack_install_path = openstack_install_path
# filter is valid suffix of files, for example: ['.py']
self.filters = filters
self.bak_openstack_path = config.CONF.sysconfig.openstack_bak_path
def get_patch_files(self, patch_path, filters):
"""
:param patch_path: path of patch's source code
:param filters: [] array of valid suffix of file. for example: ['.py']
:return: (absolute path, relative path)
for example:
[(/root/tricircle-master/novaproxy/nova/compute/clients.py,
nova/compute/clients.py), ..]
"""
return utils.get_files(patch_path, filters)
def bak_patched_file(self, bak_file_path, relative_path):
"""
:param patch_file: one file of patch's source code files,
for example: /root/tricircle-master/juno-patches/nova/nova_scheduling_patch/nova/conductor/manager.py
:param relative_path:
for example: nova/conductor/manager.py
:return:
"""
logger.info('Start bak_patched_file, bak_file_path:%s, relative_path:%s' % (bak_file_path, relative_path))
# relative_path is relative to this path(self.patch_path),
# for example: if self.patch_path = "/root/tricircle-master/juno-patches/nova/nova_scheduling_patch/"
# then relative_path of manager.py is "/nova/nova_scheduling_patch/nova/conductor/manager.py"
if not os.path.isdir(self.bak_openstack_path):
AllInOneUsedCMD.mkdir(self.bak_openstack_path)
bak_dir = os.path.join(self.bak_openstack_path, os.path.dirname(relative_path))
if not os.path.isdir(bak_dir):
AllInOneUsedCMD.mkdir(bak_dir)
if os.path.isfile(bak_file_path):
AllInOneUsedCMD.cp_to(bak_file_path, bak_dir)
else:
info = 'file: <%s> is a new file, no need to bak.' % bak_file_path
logger.info(info)
logger.info('Success to bak_patched_file, bak_file_path:%s' % bak_file_path)
@log_for_func_of_class(logger_name)
def install(self):
result = 'FAILED'
try:
patch_files = self.get_patch_files(self.patch_path, self.filters)
if not patch_files:
logger.error('No files in %s' % self.patch_path)
for absolute_path, relative_path in patch_files:
# installed_path is full install path,
# for example: /usr/lib/python2.7/dist-packages/nova/conductor/manager.py
openstack_installed_file = os.path.join(self.openstack_install_path, relative_path)
self.bak_patched_file(openstack_installed_file, relative_path)
copy_dir = os.path.dirname(openstack_installed_file)
if not os.path.isdir(copy_dir):
AllInOneUsedCMD.mkdir(copy_dir)
cp_result = AllInOneUsedCMD.cp_to(absolute_path, openstack_installed_file)
if cp_result:
logger.info('Success to copy source file:%s' % absolute_path)
else:
logger.info('Failed to copy source file:%s' % absolute_path)
result = 'SUCCESS'
except:
logger.error('Exception occur when install patch: %s, Exception: %s' %
(self.patch_path, traceback.format_exc()))
return result
class PatchConfigurator(ConfiguratorBase):
"""
we make the structure of each patch follow the original source code structure.
and config file structure is the same as the original config file structure of openstack.
so when we need to add this patch, we can read all config files and config in the system config file directly.
for example: novaproxy, the structure of patch novaproxy is as following.
novaproxy/
etc/
nova/
nova.conf
nova-compute.conf
nova/
compute/
clients.py
compute_context.py
"""
def __init__(self, absolute_path_of_patch, filter):
"""
:param absolute_path_of_patch: path of patches config file.
for example: /root/tricircle-master/novaproxy/
/root/tricircle-master/juno-patches/nova_scheduling_patch/
:param filter: ['.conf', '.ini']
:return:
"""
self.absolute_path_of_patch = absolute_path_of_patch
self.filter = filter
self.system_replacement = {
ConfigReplacement.AVAILABILITY_ZONE : config.CONF.node_cfg.availability_zone,
ConfigReplacement.CASCADED_NODE_IP : config.CONF.node_cfg.cascaded_node_ip,
ConfigReplacement.CASCADING_NODE_IP : config.CONF.node_cfg.cascading_node_ip,
ConfigReplacement.CINDER_TENANT_ID : RefServices().get_tenant_id_for_admin(),
ConfigReplacement.REGION_NAME : config.CONF.node_cfg.region_name,
ConfigReplacement.CASCADING_OS_REGION_NAME : config.CONF.node_cfg.cascading_os_region_name,
ConfigReplacement.ML2_LOCAL_IP : config.CONF.sysconfig.ml2_local_ip
}
self.exclude_replacement = ['project_id']
self.bak_openstack_path = config.CONF.sysconfig.openstack_bak_path
def _get_all_config_files(self):
"""
:return:[(<absolute_path>, <relative_path>), ..]
"""
return utils.get_files(self.absolute_path_of_patch, self.filter)
@log_for_func_of_class(logger_name)
def config(self):
result = 'FAILED'
try:
config_files = self._get_all_config_files()
if not config_files:
logger.info('There is no config file in %s ' % self.absolute_path_of_patch)
return 'No config file, no need to config.'
for absolute_path, relative_path in config_files:
user_config = ConfigCommon(absolute_path)
openstack_config_file = os.path.join(os.path.sep, relative_path)
self.bak_cfg_file(openstack_config_file, relative_path)
sys_config = ConfigCommon(openstack_config_file)
default_options = user_config.get_options_dict_of_default()
for key, value in default_options.items():
value = self.replace_value_for_sysconfig(key, value)
sys_config.set_default(key, value)
user_sections = user_config.get_sections()
for section in user_sections:
section_options = user_config.get_options_dict_of_section(section)
for key, value in section_options.items():
value = self.replace_value_for_sysconfig(key, value)
sys_config.set_option(section, key, value)
sys_config.write_commit()
result = 'SUCCESS'
except:
logger.error('Exception occur when config : %s, Exception: %s' %
(self.absolute_path_of_patch, traceback.format_exc()))
return result
def replace_value_for_sysconfig(self, key, value):
try:
if key == 'cinder_endpoint_template':
value = 'http://%(cascading_node_ip)s:8776/v2/'
if key == 'cascaded_cinder_url':
value = 'http://%(cascaded_node_ip)s:8776/v2/'
for replace_symbol in self.system_replacement.keys():
add_brasces_symbol = ''.join(['(', replace_symbol, ')'])
if add_brasces_symbol in value:
replace_value = self.system_replacement.get(replace_symbol)
value = value % {replace_symbol : replace_value}
if key == 'cinder_endpoint_template':
value = ''.join([value, '%(project_id)s'])
if key == 'cascaded_cinder_url':
value = ''.join([value, '%(project_id)s'])
except:
logger.error('Exception occur when replace value for key: %s, value: %s, Exception is: %s' %
(key, value, traceback.format_exc()))
return value
def bak_cfg_file(self, bak_file_path, relative_path):
"""
:param patch_file: one file of patch's source code files,
for example: /root/tricircle-master/juno-patches/nova/nova_scheduling_patch/nova/conductor/manager.py
:param relative_path:
for example: nova/conductor/manager.py
:return:
"""
logger.info('Start bak cfg file, bak_file_path:%s, relative_path:%s' % (bak_file_path, relative_path))
# relative_path is relative to this path(self.patch_path),
# for example: if self.patch_path = "/root/tricircle-master/juno-patches/nova/nova_scheduling_patch/"
# then relative_path of manager.py is "/nova/nova_scheduling_patch/nova/conductor/manager.py"
if not os.path.isdir(self.bak_openstack_path):
AllInOneUsedCMD.mkdir(self.bak_openstack_path)
bak_dir = os.path.join(self.bak_openstack_path, os.path.dirname(relative_path))
if not os.path.isdir(bak_dir):
AllInOneUsedCMD.mkdir(bak_dir)
if os.path.isfile(bak_file_path):
AllInOneUsedCMD.cp_to(bak_file_path, bak_dir)
else:
info = 'file: <%s> is a new file, no need to bak.' % bak_file_path
logger.info(info)
logger.info('Success to bak cfg file, bak cfg file from %s to %s' % (bak_file_path, bak_dir)) | Hybrid-Cloud/badam | engineering/engineering_factory.py | Python | apache-2.0 | 22,280 |
# This python script contains functions and classes used in the
# two synthetic galaxy model notebooks in this directory.
import astropy.io.ascii as asciitable
from scipy import interpolate
import numpy as np
def LoadData(galaxy_datafile, HIrad, ddensdR):
# Read the galaxy description file which contains rotation velocity and
# density information in a comma-delimited file where each row contains
# radius (in kpc), a rotational velocity (in km/s), and a density
# (in atoms/cm^3).
#
raw_data = asciitable.read(galaxy_datafile)
# Restructure the data into a 3xN array containing radius, rot. vel.,
# and gas density in "columns"
galaxy_data = np.hstack((raw_data['radius'].reshape(-1, 1),
raw_data['rot_vel'].reshape(-1, 1),
raw_data['density'].reshape(-1, 1)))
#
# Interpolate any extension to the rotation curve as flat,
# but with dropping density out to HI radius limit.
#
extrapol_step = (25 - galaxy_data[-1][0])/4
while (galaxy_data[-1][0] < HIrad):
new_rad = galaxy_data[-1][0]+extrapol_step
new_vel = galaxy_data[-1][1]
if (galaxy_data[-1][2] > ddensdR*extrapol_step):
new_dens = galaxy_data[-1][2] - ddensdR*extrapol_step
else:
new_dens = 0.0
new_row = np.array([new_rad, new_vel, new_dens])
galaxy_data = np.vstack((galaxy_data, new_row))
# Save raw values
rad_raw = np.copy(galaxy_data[:, 0])
rotvel_raw = np.copy(galaxy_data[:, 1])
density_raw = np.copy(galaxy_data[:, 2])
return (rad_raw, rotvel_raw, density_raw)
def spline_curves(rad, vel, dens, dr):
# Do a modified spline fit to smooth the rotation curve and gas density
# data to smooth out data gaps and make sure density and rotational
# velocity don't go negative
# Find the spline representation of rotation curve (default is cubic
# spline)
rotvel_fit = interpolate.splrep(rad, vel, s=0)
density_fit = interpolate.splrep(rad, dens, s=0)
# Fit spline along evenly spaced points (in radius) and restrict rotational
# velocity and density to be positive (since spline fit is bit wiggly here
# and caused very small scale 'negative' values at origin for velocity and
# at high radii for density).
rad_sp = np.linspace(0, rad[-1], int(rad[-1]/dr))
rotvel_sp = np.absolute(interpolate.splev(rad_sp, rotvel_fit, der=0).round(1))
density_sp = np.absolute(interpolate.splev(rad_sp, density_fit, der=0).round(3))
return(rad_sp, rotvel_sp, density_sp)
def RotCoord(x, y):
# Converts (x, y) to (r,theta)
# Can work on entire arrays of x and y
return (np.sqrt(x*x+y*y), np.arctan2(y, x))
class nf(float):
# This class allows floating point numbers to be printed as integers.
# Based on
# http://matplotlib.sourceforge.net/examples/pylab_examples/contour_label_demo.html
def __repr__(self):
str = '%.1f' % (self.__float__(),)
if str[-1] == '0':
return '%.0f' % self.__float__()
else:
return '%.1f' % self.__float__()
| JuanCab/synthetic_HI_models | galaxyparam.py | Python | gpl-3.0 | 3,144 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012 smallevilbeast
#
# Author: smallevilbeast <houshao55@gmail.com>
# Maintainer: smallevilbeast <houshao55@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import os
from dtk.ui.window import Window
from dtk.ui.utils import get_parent_dir
from dtk.ui.label import Label
from dtk.ui.draw import draw_vlinear
from dtk.ui.entry import TextEntry, InputEntry
from dtk.ui.titlebar import Titlebar
from dtk.ui.button import CheckButton, Button
from dtk.ui.combo import ComboBox
from ui.utils import (set_widget_gravity, set_widget_left, set_widget_center)
from ui.skin import app_theme
def get_banner_image():
return os.path.join(get_parent_dir(__file__, 3), "data", "banner", "default.png")
class Login(Window):
def __init__(self):
Window.__init__(self, enable_resize=True)
self.set_position(gtk.WIN_POS_CENTER)
self.set_default_size(290, 512)
titlebar = Titlebar(["min", "max", "close"], app_name="Baidu Hi for Linux")
titlebar.min_button.connect("clicked", lambda w: self.min_window())
titlebar.max_button.connect("clicked", lambda w: self.toggle_max_window())
titlebar.close_button.connect("clicked", lambda w: gtk.main_quit())
self.add_move_event(titlebar.drag_box)
self.add_toggle_event(titlebar.drag_box)
banner_image = gtk.image_new_from_file(get_banner_image())
banner_box = set_widget_gravity(banner_image, (0,0,0,0), (10, 0, 0, 0))
user_box, self.user_entry = self.create_combo_entry("帐号:")
passwd_box, self.passwd_entry = self.create_combo_entry("密码:")
self.remember_passwd = CheckButton("记住密码")
self.automatic_login = CheckButton("自动登录")
self.status_box, self.status_combo_box = self.create_combo_widget("状态:",
[(key, None) for key in "在线 忙碌 离开 隐身".split()],
0)
check_box = gtk.HBox(spacing=10)
check_box.pack_start(self.remember_passwd, False, False)
check_box.pack_start(self.automatic_login, False, False)
body_table = gtk.Table(5, 1)
body_table.set_row_spacings(10)
body_table.attach(banner_box, 0, 1, 0, 1, xoptions=gtk.FILL, yoptions=gtk.FILL)
body_table.attach(user_box, 0, 1, 1, 2, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=8)
body_table.attach(passwd_box, 0, 1, 2, 3, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=8)
# body_table.attach(self.status_box, 0, 1, 3, 4, xoptions=gtk.FILL, yoptions=gtk.FILL, xpadding=8)
body_table.attach(check_box, 0, 1, 4, 5, xoptions=gtk.FILL, yoptions=gtk.FILL)
body_box_align = set_widget_gravity(set_widget_center(body_table),
(1, 1, 0.5, 0.5),
(0, 0, 30, 30))
self.login_button = Button("登录")
self.login_button.set_size_request(95, 30)
login_button_align = set_widget_gravity(set_widget_center(self.login_button),
(1, 1, 0.5, 0.5),
(30, 30, 0, 0))
main_box = gtk.VBox()
main_box.connect("expose-event", self.draw_border_mask)
main_box.pack_start(body_box_align, False, True)
main_box.pack_start(login_button_align, False, True)
self.window_frame.pack_start(titlebar, False, True)
self.window_frame.pack_start(main_box)
def create_combo_entry(self, label_content, entry_content=""):
vbox = gtk.VBox()
vbox.set_spacing(5)
label = Label(label_content)
text_entry = TextEntry(entry_content)
text_entry.set_size(198, 26)
entry_box = set_widget_center(text_entry)
vbox.pack_start(label, False, False)
vbox.pack_start(entry_box, False, False)
return vbox, text_entry
def draw_border_mask(self, widget, event):
cr = widget.window.cairo_create()
rect = widget.allocation
draw_vlinear(cr, rect.x + 8, rect.y + 6, rect.width - 16, rect.height - 16,
app_theme.get_shadow_color("linearBackground").get_color_info(),
4)
def create_combo_widget(self, label_content, items, select_index=0):
label = Label(label_content)
combo_box = ComboBox(items, select_index=select_index)
hbox = gtk.HBox(spacing=5)
hbox.pack_start(label, False, False)
hbox.pack_start(combo_box, False, False)
return hbox, combo_box
| lovesnow/weido | src/ui/login.py | Python | gpl-3.0 | 5,368 |
# Copyright 2015, Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext_lazy
from openstack_dashboard.api import neutron as api
from openstack_dashboard import policy
from horizon import tables
class AddRouterRoute(policy.PolicyTargetMixin, tables.LinkAction):
name = "create"
verbose_name = _("Add Static Route")
url = "horizon:project:routers:addrouterroute"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "update_router"),)
def get_link_url(self, datum=None):
router_id = self.table.kwargs['router_id']
return reverse(self.url, args=(router_id,))
class RemoveRouterRoute(policy.PolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ngettext_lazy(
"Delete Static Route",
"Delete Static Routes",
count
)
@staticmethod
def action_past(count):
return ngettext_lazy(
"Deleted Static Route",
"Deleted Static Routes",
count
)
failure_url = 'horizon:project:routers:detail'
policy_rules = (("network", "update_router"),)
def delete(self, request, obj_id):
router_id = self.table.kwargs['router_id']
api.router_static_route_remove(request, router_id, [obj_id])
class ExtraRoutesTable(tables.DataTable):
destination = tables.Column("destination",
verbose_name=_("Destination CIDR"))
nexthop = tables.Column("nexthop", verbose_name=_("Next Hop"))
def get_object_display(self, datum):
"""Display ExtraRoutes when deleted."""
return (super().get_object_display(datum) or
datum.destination + " -> " + datum.nexthop)
class Meta(object):
name = "extra_routes"
verbose_name = _("Static Routes")
table_actions = (AddRouterRoute, RemoveRouterRoute)
row_actions = (RemoveRouterRoute, )
| openstack/horizon | openstack_dashboard/dashboards/project/routers/extensions/extraroutes/tables.py | Python | apache-2.0 | 2,639 |
from __future__ import absolute_import
import sys
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.dialects.mysql import TINYINT, SMALLINT
from sqlalchemy.orm import subqueryload
from .base import BetterBase, session_scope
from .drop import DropAssociation
class Relic(BetterBase):
__tablename__ = 'relic'
id = Column(Integer, primary_key=True, autoincrement=True)
equipment_id = Column(Integer)
name = Column(String(length=64), nullable=False)
base_rarity = Column(TINYINT, nullable=False)
rarity = Column(TINYINT, nullable=False)
level = Column(TINYINT, nullable=False)
level_max = Column(TINYINT, nullable=False)
evolution_num = Column(TINYINT, nullable=False)
max_evolution_num = Column(TINYINT, nullable=False)
can_evolve_potentially = Column(Boolean, nullable=False)
is_max_evolution_num = Column(Boolean, nullable=False)
series_id = Column(Integer, nullable=False)
image_path = Column(String(length=96), nullable=False)
detail_image_path = Column(String(length=96), nullable=False)
description = Column(String(length=256), nullable=False)
has_someones_soul_strike = Column(Boolean, nullable=False)
has_soul_strike = Column(Boolean, nullable=False)
soul_strike_id = Column(Integer, nullable=True)
required_enhancement_base_gil = Column(SMALLINT, nullable=True)
required_evolution_gil = Column(Integer, nullable=True)
sale_gil = Column(SMALLINT, nullable=False)
acc = Column(SMALLINT, nullable=False)
hp = Column(SMALLINT, nullable=False)
atk = Column(SMALLINT, nullable=False)
critical = Column(SMALLINT, nullable=False)
defense = Column(SMALLINT, nullable=False)
eva = Column(SMALLINT, nullable=False)
matk = Column(SMALLINT, nullable=False)
mdef = Column(SMALLINT, nullable=False)
mnd = Column(SMALLINT, nullable=False)
series_acc = Column(SMALLINT, nullable=False)
series_hp = Column(SMALLINT, nullable=False)
series_atk = Column(SMALLINT, nullable=False)
series_def = Column(SMALLINT, nullable=False)
series_eva = Column(SMALLINT, nullable=False)
series_matk = Column(SMALLINT, nullable=False)
series_mdef = Column(SMALLINT, nullable=False)
series_mnd = Column(SMALLINT, nullable=False)
category_id = Column(TINYINT, nullable=False)
category_name = Column(String(length=32), nullable=False)
equipment_type = Column(TINYINT, nullable=False) # 1:"Weapon", 2:"Armor", 3:"Accessory"
# "allowed_buddy_id": 0, # This might be for specific characters only
# "atk_ss_point_factor": 0, # I guess this increases the soul strike charge rate?
# "def_ss_point_factor": 0, # I guess this increases the soul strike charge rate?
# "attributes": [], # What?
# "can_evolve_now": false, # Don't care?
# "created_at": 1428491598, # Don't care
# "exp": 0, # This is the exp for this Relic instance
# "id": 49594264, # This is the id for this Relic instance
# "is_sp_enhancement_material": false, # Don't care
# "is_usable_as_enhancement_material": false, # Don't care
# "is_usable_as_enhancement_src": false, # Don't care
# "evol_max_level_of_base_rarity": {} # Don't care
frontend_columns = (
('name', 'Name'),
('base_rarity', 'Rarity'),
('category_name', 'Category'),
#('equipment_type', 'Type'),
)
@property
def search_id(self):
return self.equipment_id
@property
def extra_tabs(self):
return (
{
'id': 'stats',
'title': 'Stats by Level',
'search_id': self.search_id,
'columns': (
('rarity', 'Rarity'),
('level', 'Level'),
('required_enhancement_base_gil', 'Enhancement cost'),
('required_evolution_gil', 'Evolution cost'),
('sale_gil', 'Sale gil'),
('atk', 'ATK'),
('critical', 'CRIT'),
('defense', 'DEF'),
('acc', 'ACC'),
('eva', 'EVA'),
('matk', 'MAG'),
('mdef', 'RES'),
('mnd', 'MIND'),
('series_acc', 'RS ACC'),
('series_atk', 'RS ATK'),
('series_def', 'RS DEF'),
('series_eva', 'RS EVA'),
('series_matk', 'RS MAG'),
('series_mdef', 'RS RES'),
('series_mnd', 'RS MIND'),
),
},
)
def generate_main_panels(self):
main_stats = []
for k, v in self.main_columns:
main_stats.append('{}: {}'.format(v, self.__getattribute__(k)))
if self.has_soul_strike:
main_stats.append('Soul Strike: {} [TO BE IMPLEMENTED]'.format(
self.soul_strike_id))
# This is repeated in Material.main_panels
# This queries drop_table, enemy, world, dungeon, battle
# TODO 2015-05-11
# Filter out (expired?) events
drop_locations = []
with session_scope() as session:
drops = session.query(DropAssociation).filter_by(
drop_id=self.search_id).options(subqueryload('enemy')).all()
for drop in drops:
drop_locations.append(
'<a href="/{}">{}</a>'.format(
drop.enemy.search_id, str(drop)))
self._main_panels = (
{
'title': 'Main Stats',
'body': self.description if self.description != 'None' else '',
'items': main_stats,
},
{
'title': 'Drop Locations',
'items': drop_locations,
'footer': 'These locations are not all inclusive and the drop ra\
tes may vary.',
},
)
def __init__(self, **kwargs):
name = kwargs['name']
name = name.replace(u'\uff0b', '')
name = name.encode(sys.stdout.encoding, errors='ignore')
self.name = name
self.defense = kwargs['def']
self.image_path = kwargs['image_path'].replace('/dff', '')
self.detail_image_path = kwargs['detail_image_path'].replace('/dff', '')
# Added with 2016-01-21 patch
# TODO 2016-01-21
# Create another function for this
for i in (
'sp_acc',
'sp_atk',
'sp_def',
'sp_eva',
'sp_hp',
'sp_matk',
'sp_mdef',
'sp_mnd',
'sp_spd',
):
if i in kwargs:
kwargs[i.replace('sp_', 'series_')] = kwargs[i]
del(kwargs[i])
for i in (
'allowed_buddy_id',
'allowed_buddy_name',
'atk_ss_point_factor',
'def_ss_point_factor',
'attributes',
'can_evolve_now',
'created_at',
'exp',
'is_sp_enhancement_material',
'is_usable_as_enhancement_material',
'is_usable_as_enhancement_src',
'evol_max_level_of_base_rarity',
'thumbnail_path',
'id',
'def',
'name',
'image_path',
'detail_image_path',
# Added with 2015-06-07 patch
'is_accessory',
'is_armor',
'is_weapon',
# Added with 2015-09-12 patch
'series_hammering_num',
'is_hammering_item',
'hammering_affect_param_key',
'max_hammering_num',
'hammering_num',
'is_max_level',
# Added with 2015-12-14 patch
'ex_series_id',
'is_locked',
# Added with 2016-08-09 patch
'atk_type',
'max_hyper_evolution_num',
'is_max_hyper_evolution_num',
'can_hyper_evolve_now',
'can_hyper_evolve_potentially',
'hyper_evolve_recipe',
# Added with 2016-10 patch
'additional_bonus_attributes',
'additional_bonus_acc',
'additional_bonus_atk',
'additional_bonus_def',
'additional_bonus_eva',
'additional_bonus_hp',
'additional_bonus_matk',
'additional_bonus_mdef',
'additional_bonus_mnd',
'acc_min',
'atk_min',
'def_min',
'eva_min',
'hp_min',
'matk_min',
'mdef_min',
'mnd_min',
# Added with 2017-05 patch
'legend_materia_id',
):
if i in kwargs:
del(kwargs[i])
super(Relic, self).__init__(**kwargs)
def __repr__(self):
ret = '[{}*] {}'.format(self.rarity, self.name)
if self.evolution_num:
ret += ' ' + '+'*self.evolution_num
ret += ' {}/{}'.format(self.level, self.level_max)
return ret
def find_missing_stats():
'''
# TODO 2015-06-09
Return an iterable of Relic objects where a stat appears to be missing.
'''
relics = []
with session_scope() as session:
q = session.query(Relic)
session.expunge_all()
return relics
### EOF ###
| rEtSaMfF/ffrk-bottle | models/relic.py | Python | gpl-3.0 | 9,640 |
#! /usr/bin/python
import sys
import os
import json
import grpc
import time
import subprocess
from google.oauth2 import service_account
import google.oauth2.credentials
import google.auth.transport.requests
import google.auth.transport.grpc
from google.firestore.v1beta1 import firestore_pb2
from google.firestore.v1beta1 import firestore_pb2_grpc
from google.firestore.v1beta1 import document_pb2
from google.firestore.v1beta1 import document_pb2_grpc
from google.firestore.v1beta1 import common_pb2
from google.firestore.v1beta1 import common_pb2_grpc
from google.firestore.v1beta1 import write_pb2
from google.firestore.v1beta1 import write_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
def first_message(database, write):
messages = [
firestore_pb2.WriteRequest(database = database, writes = [])
]
for msg in messages:
yield msg
def generate_messages(database, writes, stream_id, stream_token):
# writes can be an array and append to the messages, so it can write multiple Write
# here just write one as example
messages = [
firestore_pb2.WriteRequest(database=database, writes = []),
firestore_pb2.WriteRequest(database=database, writes = [writes], stream_id = stream_id, stream_token = stream_token)
]
for msg in messages:
yield msg
def main():
fl = os.path.dirname(os.path.abspath(__file__))
fn = os.path.join(fl, 'grpc.json')
with open(fn) as grpc_file:
item = json.load(grpc_file)
creds = item["grpc"]["Write"]["credentials"]
credentials = service_account.Credentials.from_service_account_file("{}".format(creds))
scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/datastore'])
http_request = google.auth.transport.requests.Request()
channel = google.auth.transport.grpc.secure_authorized_channel(scoped_credentials, http_request, 'firestore.googleapis.com:443')
stub = firestore_pb2_grpc.FirestoreStub(channel)
database = item["grpc"]["Write"]["database"]
name = item["grpc"]["Write"]["name"]
first_write = write_pb2.Write()
responses = stub.Write(first_message(database, first_write))
for response in responses:
print("Received message %s" % (response.stream_id))
print(response.stream_token)
value_ = document_pb2.Value(string_value = "foo_boo")
update = document_pb2.Document(name=name, fields={"foo":value_})
writes = write_pb2.Write(update_mask=common_pb2.DocumentMask(field_paths = ["foo"]), update=update)
r2 = stub.Write(generate_messages(database, writes, response.stream_id, response.stream_token))
for r in r2:
print(r.write_results)
if __name__ == "__main__":
main()
| GoogleCloudPlatform/grpc-gcp-python | firestore/examples/end2end/src/Write.py | Python | apache-2.0 | 2,967 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from setuptools import setup, find_packages
setup(
name='PTestCase',
description='Parametrized tests support for python builtin unitest framework',
long_description=(
'Isolated test cases generated from a template method and set of unnamed or named :term:`calls` '
'provided in decorator fashion.'
),
version='1.0.2',
packages=find_packages(),
author='Dariusz Górecki',
author_email='darek.krk@gmail.com',
url='https://github.com/canni/ptestcase',
keywords=['test', 'parametrized', 'unittest'],
install_requires=[
'six',
],
zip_safe=True,
test_suite='ptestcase.tests',
license='BSD License',
classifiers=[
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
platforms=['any'],
)
| canni/ptestcase | setup.py | Python | bsd-2-clause | 1,385 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify execution of custom test cases.
"""
import TestSCons
_exe = TestSCons._exe
_obj = TestSCons._obj
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
NCR = test.NCR # non-cached rebuild
CR = test.CR # cached rebuild (up to date)
NCF = test.NCF # non-cached build failure
CF = test.CF # cached build failure
compileOK = '#include <stdio.h>\\nint main() {printf("Hello");return 0;}'
compileFAIL = "syntax error"
linkOK = compileOK
linkFAIL = "void myFunc(); int main() { myFunc(); }"
runOK = compileOK
runFAIL = "int main() { return 1; }"
test.write('pyAct.py', """\
import sys
print sys.argv[1]
sys.exit(int(sys.argv[1]))
""")
test.write('SConstruct', """\
def CheckCustom(test):
test.Message( 'Executing MyTest ... ' )
retCompileOK = test.TryCompile( '%(compileOK)s', '.c' )
retCompileFAIL = test.TryCompile( '%(compileFAIL)s', '.c' )
retLinkOK = test.TryLink( '%(linkOK)s', '.c' )
retLinkFAIL = test.TryLink( '%(linkFAIL)s', '.c' )
(retRunOK, outputRunOK) = test.TryRun( '%(runOK)s', '.c' )
(retRunFAIL, outputRunFAIL) = test.TryRun( '%(runFAIL)s', '.c' )
(retActOK, outputActOK) = test.TryAction( '%(_python_)s pyAct.py 0 > $TARGET' )
(retActFAIL, outputActFAIL) = test.TryAction( '%(_python_)s pyAct.py 1 > $TARGET' )
resOK = retCompileOK and retLinkOK and retRunOK and outputRunOK=="Hello"
resOK = resOK and retActOK and int(outputActOK)==0
resFAIL = retCompileFAIL or retLinkFAIL or retRunFAIL or outputRunFAIL!=""
resFAIL = resFAIL or retActFAIL or outputActFAIL!=""
test.Result( int(resOK and not resFAIL) )
return resOK and not resFAIL
env = Environment()
import os
env.AppendENVPath('PATH', os.environ['PATH'])
conf = Configure( env, custom_tests={'CheckCustom' : CheckCustom} )
conf.CheckCustom()
env = conf.Finish()
""" % locals())
test.run()
test.checkLogAndStdout(["Executing MyTest ... "],
["yes"],
[[(('.c', NCR), (_obj, NCR)),
(('.c', NCR), (_obj, NCF)),
(('.c', NCR), (_obj, NCR), (_exe, NCR)),
(('.c', NCR), (_obj, NCR), (_exe, NCF)),
(('.c', NCR), (_obj, NCR), (_exe, NCR), (_exe + '.out', NCR)),
(('.c', NCR), (_obj, NCR), (_exe, NCR), (_exe + '.out', NCF)),
(('', NCR),),
(('', NCF),)]],
"config.log", ".sconf_temp", "SConstruct")
test.run()
test.checkLogAndStdout(["Executing MyTest ... "],
["yes"],
[[(('.c', CR), (_obj, CR)),
(('.c', CR), (_obj, CF)),
(('.c', CR), (_obj, CR), (_exe, CR)),
(('.c', CR), (_obj, CR), (_exe, CF)),
(('.c', CR), (_obj, CR), (_exe, CR), (_exe + '.out', CR)),
(('.c', CR), (_obj, CR), (_exe, CR), (_exe + '.out', CF)),
(('', CR),),
(('', CF),)]],
"config.log", ".sconf_temp", "SConstruct")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Distrotech/scons | test/Configure/custom-tests.py | Python | mit | 4,466 |
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
class SQLAlchemyAdapterMetaClass(type):
@staticmethod
def wrap(func):
"""Return a wrapped instance method"""
def auto_commit(self, *args, **kwargs):
try:
return_value = func(self, *args, **kwargs)
self.commit()
return return_value
except:
self.rollback()
raise
return auto_commit
def __new__(cls, name, bases, attrs):
"""If the method in this list, DON'T wrap it"""
no_wrap = ["commit", "merge", "rollback", "remove"]
def wrap(method):
"""private methods are not wrapped"""
if method not in no_wrap and not method.startswith("__"):
attrs[method] = cls.wrap(attrs[method])
map(lambda m: wrap(m), attrs.keys())
return super(SQLAlchemyAdapterMetaClass, cls).__new__(cls, name, bases, attrs)
class DBAdapter(object):
def __init__(self, db_session):
self.db_session = db_session
class SQLAlchemyAdapter(DBAdapter):
"""Use MetaClass to make this class"""
__metaclass__ = SQLAlchemyAdapterMetaClass
def __init__(self, db_session):
super(SQLAlchemyAdapter, self).__init__(db_session)
# ------------------------------ methods that no need to wrap --- start ------------------------------
def commit(self):
self.db_session.commit()
def remove(self):
self.db_session.remove()
def merge(self, obj):
self.db_session.merge(obj)
def rollback(self):
self.db_session.rollback()
# ------------------------------ methods that no need to wrap --- end------------------------------
# ------------------------------ auto wrapped 'public' methods --- start ------------------------------
def get_object(self, ObjectClass, id):
""" Retrieve one object specified by the primary key 'pk' """
return ObjectClass.query.get(id)
def find_all_objects(self, ObjectClass, *criterion):
return ObjectClass.query.filter(*criterion).all()
def find_all_objects_by(self, ObjectClass, **kwargs):
return ObjectClass.query.filter_by(**kwargs).all()
def find_all_objects_order_by(self, ObjectClass, **kwargs):
return ObjectClass.query.filter_by(**kwargs).order_by(ObjectClass.id.desc()).all()
def count(self, ObjectClass, *criterion):
return ObjectClass.query.filter(*criterion).count()
def count_by(self, ObjectClass, **kwargs):
return ObjectClass.query.filter_by(**kwargs).count()
def find_first_object(self, ObjectClass, *criterion):
return ObjectClass.query.filter(*criterion).first()
def find_first_object_by(self, ObjectClass, **kwargs):
return ObjectClass.query.filter_by(**kwargs).first()
def add_object(self, inst):
self.db_session.add(inst)
def add_object_kwargs(self, ObjectClass, **kwargs):
""" Add an object of class 'ObjectClass' with fields and values specified in '**kwargs'. """
object = ObjectClass(**kwargs)
self.db_session.add(object)
return object
def update_object(self, object, **kwargs):
""" Update object 'object' with the fields and values specified in '**kwargs'. """
for key, value in kwargs.items():
if hasattr(object, key):
setattr(object, key, value)
else:
raise KeyError("Object '%s' has no field '%s'." % (type(object), key))
def delete_object(self, object):
""" Delete object 'object'. """
self.db_session.delete(object)
def delete_all_objects(self, ObjectClass, *criterion):
ObjectClass.query.filter(*criterion).delete(synchronize_session=False)
def delete_all_objects_by(self, ObjectClass, **kwargs):
""" Delete all objects matching the case sensitive filters in 'kwargs'. """
# Convert each name/value pair in 'kwargs' into a filter
query = ObjectClass.query.filter_by(**kwargs)
# query filter by in_ do not support none args, use synchronize_session=False instead
query.delete(synchronize_session=False)
# ------------------------------ auto wrapped 'public' methods --- end ------------------------------ | xunxunzgq/open-hackathon-bak_01 | open-hackathon-client/src/client/database/db_adapters.py | Python | mit | 5,627 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("enhydris", "0028_remove_old_time_step")]
operations = [
migrations.RemoveField(model_name="timeseries", name="interval_type"),
migrations.DeleteModel(name="IntervalType"),
]
| openmeteo/enhydris | enhydris/migrations/0029_remove_interval_type.py | Python | agpl-3.0 | 296 |
'''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os, base64
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class HttpProxy:
def __init__(self, address, username = None, password = None):
self.address = address
self.username = username
self.password = password
def __repr__(self):
return repr(self.address)
def handler(self):
return HttpProxyHandler(self)
@staticmethod
def getFromEnviron():
url = None
for key in ('http_proxy', 'https_proxy'):
url = os.environ.get(key)
if url: break
if not url:
return None
dat = urlparse(url)
port = 80 if dat.scheme == 'http' else 443
if dat.port != None: port = int(dat.port)
host = dat.hostname
return HttpProxy((host, port), dat.username, dat.password)
class HttpProxyHandler:
def __init__(self, proxy):
self.state = 'init'
self.proxy = proxy
def onConnect(self):
pass
def connect(self, socket, pair):
proxy = self.proxy
authHeader = None
if proxy.username and proxy.password:
key = bytes(proxy.username, 'ascii') + b':' + bytes(proxy.password, 'ascii') if (bytes != str) else bytes(proxy.username) + b':' + proxy.password
auth = base64.b64encode(key)
authHeader = b'Proxy-Authorization: Basic ' + auth + b'\r\n'
data = bytearray('CONNECT %s:%d HTTP/1.1\r\nHost: %s:%d\r\n' % (2 * pair), 'ascii')
if authHeader:
data += authHeader
data += b'\r\n'
self.state = 'connect'
self.data = data
socket.connect(proxy.address)
def send(self, socket):
if self.state == 'connect':
socket.send(self.data)
self.state = 'sent'
def recv(self, socket, size):
if self.state == 'sent':
data = socket.recv(size)
data = data.decode('ascii')
status = data.split(' ', 2)
if status[1] != '200':
raise Exception('%s' % (data[:data.index('\r\n')]))
self.state = 'end'
self.onConnect()
return data | albert-chin/yowsup | yowsup/common/http/httpproxy.py | Python | gpl-3.0 | 3,283 |
# Timed mute: !tm <player> <seconds> <reason>
# default time 5 minutes, default reason None
# by topologist June 30th 2012
from scheduler import Scheduler
from commands import add, admin, get_player, join_arguments, name
@name('tm')
@admin
def timed_mute(connection, *args):
protocol = connection.protocol
nick = args[0]
time = int(args[1])
reason = join_arguments(args[2:])
player = get_player(protocol, nick)
if time < 0:
raise ValueError()
if not player.mute:
TimedMute(player, time, reason)
else:
return '%s is already muted!' % nick
add(timed_mute)
class TimedMute(object):
player = None
time = None
def __init__(self, player, time = 300, reason = 'None'):
if time == 0:
player.mute = True
player.protocol.send_chat('%s was muted indefinitely (Reason: %s)' % (
player.name, reason), irc = True)
return
schedule = Scheduler(player.protocol)
schedule.call_later(time, self.end)
player.mute_schedule = schedule
player.protocol.send_chat('%s was muted for %s seconds (Reason: %s)' % (
player.name, time, reason), irc = True)
player.mute = True
self.player = player
self.time = time
def end(self):
self.player.mute = False
message = '%s was unmuted after %s seconds' % (self.player.name, self.time)
self.player.protocol.send_chat(message, irc = True)
def apply_script(protocol, connection, config):
class TimedMuteConnection(connection):
mute_schedule = None
def on_disconnect(self):
if self.mute_schedule:
del self.mute_schedule
connection.on_disconnect(self)
return protocol, TimedMuteConnection
| Architektor/PySnip | contrib/scripts/timedmute.py | Python | gpl-3.0 | 1,733 |
import numpy
import six
from chainer.backends import cuda
from chainer.functions.array import permutate
from chainer.functions.array import transpose_sequence
from chainer.functions.connection import n_step_rnn as rnn
from chainer.initializers import normal
from chainer import link
from chainer.utils import argument
from chainer import variable
def argsort_list_descent(lst):
return numpy.argsort([-len(x.data) for x in lst]).astype('i')
def permutate_list(lst, indices, inv):
ret = [None] * len(lst)
if inv:
for i, ind in enumerate(indices):
ret[ind] = lst[i]
else:
for i, ind in enumerate(indices):
ret[i] = lst[ind]
return ret
class NStepRNNBase(link.ChainList):
"""__init__(self, n_layers, in_size, out_size, dropout)
Base link class for Stacked RNN/BiRNN links.
This link is base link class for :func:`chainer.links.NStepRNN` and
:func:`chainer.links.NStepBiRNN`.
This link's behavior depends on argument, ``use_bi_direction``.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.links.NStepRNNReLU`
:func:`chainer.links.NStepRNNTanh`
:func:`chainer.links.NStepBiRNNReLU`
:func:`chainer.links.NStepBiRNNTanh`
""" # NOQA
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs):
if kwargs:
argument.check_unexpected_kwargs(
kwargs,
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config',
use_bi_direction='use_bi_direction is not supported anymore',
activation='activation is not supported anymore')
argument.assert_kwargs_empty(kwargs)
weights = []
if self.use_bi_direction:
direction = 2
else:
direction = 1
for i in six.moves.range(n_layers):
for di in six.moves.range(direction):
weight = link.Link()
with weight.init_scope():
for j in six.moves.range(self.n_weights):
if i == 0 and j < self.n_weights // 2:
w_in = in_size
elif i > 0 and j < self.n_weights // 2:
w_in = out_size * direction
else:
w_in = out_size
w = variable.Parameter(
normal.Normal(numpy.sqrt(1. / w_in)),
(out_size, w_in))
b = variable.Parameter(0, (out_size,))
setattr(weight, 'w%d' % j, w)
setattr(weight, 'b%d' % j, b)
weights.append(weight)
super(NStepRNNBase, self).__init__(*weights)
self.ws = [[getattr(layer, 'w%d' % i)
for i in six.moves.range(self.n_weights)]
for layer in self]
self.bs = [[getattr(layer, 'b%d' % i)
for i in six.moves.range(self.n_weights)]
for layer in self]
self.n_layers = n_layers
self.dropout = dropout
self.out_size = out_size
self.direction = direction
def init_hx(self, xs):
shape = (self.n_layers * self.direction, len(xs), self.out_size)
with cuda.get_device_from_id(self._device_id):
hx = variable.Variable(self.xp.zeros(shape, dtype=xs[0].dtype))
return hx
def rnn(self, *args):
"""Calls RNN function.
This function must be implemented in a child class.
"""
raise NotImplementedError
@property
def n_cells(self):
"""Returns the number of cells.
This function must be implemented in a child class.
"""
return NotImplementedError
def forward(self, hx, xs, **kwargs):
"""forward(self, hx, xs)
Calculate all hidden states and cell states.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
hx (~chainer.Variable or None): Initial hidden states. If ``None``
is specified zero-vector is used. Its shape is ``(S, B, N)``
for uni-directional RNN and ``(2S, B, N)`` for
bi-directional RNN where ``S`` is the number of layers
and is equal to ``n_layers``, ``B`` is the mini-batch size,
and ``N`` is the dimension of the hidden units.
xs (list of ~chainer.Variable): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence. Its shape is ``(L_t, I)``, where ``L_t`` is the
length of a sequence for time ``t``, and ``I`` is the size of
the input and is equal to ``in_size``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(L_t, N)`` for
uni-directional RNN and ``(L_t, 2N)`` for bi-directional RNN
where ``L_t`` is the length of a sequence for time ``t``,
and ``N`` is size of hidden units.
"""
(hy,), ys = self._call([hx], xs, **kwargs)
return hy, ys
def _call(self, hs, xs, **kwargs):
"""Calls RNN function.
Args:
hs (list of ~chainer.Variable or None): Lisit of hidden states.
Its length depends on its implementation.
If ``None`` is specified zero-vector is used.
xs (list of ~chainer.Variable): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence.
Returns:
tuple: hs
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
assert isinstance(xs, (list, tuple))
xp = cuda.get_array_module(*(list(hs) + list(xs)))
indices = argsort_list_descent(xs)
indices_array = xp.array(indices)
xs = permutate_list(xs, indices, inv=False)
hxs = []
for hx in hs:
if hx is None:
hx = self.init_hx(xs)
else:
hx = permutate.permutate(hx, indices_array, axis=1, inv=False)
hxs.append(hx)
trans_x = transpose_sequence.transpose_sequence(xs)
args = [self.n_layers, self.dropout] + hxs + \
[self.ws, self.bs, trans_x]
result = self.rnn(*args)
hys = [permutate.permutate(h, indices_array, axis=1, inv=True)
for h in result[:-1]]
trans_y = result[-1]
ys = transpose_sequence.transpose_sequence(trans_y)
ys = permutate_list(ys, indices, inv=True)
return hys, ys
class NStepRNNTanh(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional RNN for sequences.
This link is stacked version of Uni-directional RNN for sequences.
Note that the activation function is ``tanh``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_rnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_rnn`
"""
n_weights = 2
use_bi_direction = False
def rnn(self, *args):
return rnn.n_step_rnn(*args, activation='tanh')
@property
def n_cells(self):
return 1
class NStepRNNReLU(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional RNN for sequences.
This link is stacked version of Uni-directional RNN for sequences.
Note that the activation function is ``relu``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_rnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_rnn`
"""
n_weights = 2
use_bi_direction = False
def rnn(self, *args):
return rnn.n_step_rnn(*args, activation='relu')
@property
def n_cells(self):
return 1
class NStepBiRNNTanh(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional RNN for sequences.
This link is stacked version of Bi-directional RNN for sequences.
Note that the activation function is ``tanh``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_birnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_birnn`
"""
n_weights = 2
use_bi_direction = True
def rnn(self, *args):
return rnn.n_step_birnn(*args, activation='tanh')
@property
def n_cells(self):
return 1
class NStepBiRNNReLU(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional RNN for sequences.
This link is stacked version of Bi-directional RNN for sequences.
Note that the activation function is ``relu``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_birnn`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_birnn`
"""
n_weights = 2
use_bi_direction = True
def rnn(self, *args):
return rnn.n_step_birnn(*args, activation='relu')
@property
def n_cells(self):
return 1
| rezoo/chainer | chainer/links/connection/n_step_rnn.py | Python | mit | 13,081 |
from corehq.apps.hqcase.dbaccessors import get_case_properties
from corehq.apps.users.cases import get_owner_id
from soil import DownloadBase
def export_cases(domain, cases, workbook, filter_group=None, users=None, all_groups=None, process=None):
by_user_id = dict([(user.user_id, user) for user in users]) if users else {}
by_group_id = dict([(g.get_id, g) for g in all_groups]) if all_groups else {}
owner_ids = set(by_user_id.keys())
if filter_group:
owner_ids.add(filter_group.get_id)
else:
# |= reassigns owner_ids to the union of the two sets
owner_ids |= set(by_group_id.keys())
case_static_keys = (
"case_id",
"username",
"user_id",
"owner_id",
"owner_name",
"type",
"name",
"opened_on",
"modified_on",
"closed",
"closed_on",
"domain",
"external_id",
)
case_dynamic_keys = get_case_properties(domain)
case_rows = []
def render_case_attr(case, key):
attr = getattr(case, key)
if isinstance (attr, dict):
return attr.get('#text', '')
else:
return attr
num_cases = len(cases)
def get_matching_owner(case):
if by_user_id:
if case.user_id in by_user_id:
return case.user_id
elif get_owner_id(case) in by_user_id:
return get_owner_id(case)
else:
return get_owner_id(case)
for i, case in enumerate(cases):
if process:
DownloadBase.set_progress(process, i, num_cases)
if get_owner_id(case) in owner_ids:
matching_owner = get_matching_owner(case)
case_row = {'dynamic_properties': {}}
for key in case_static_keys:
if key == 'username':
try:
case_row[key] = by_user_id[matching_owner].raw_username
except (TypeError, KeyError):
case_row[key] = ''
elif key == 'owner_name':
if users and case.owner_id in by_user_id:
case_row[key] = by_user_id[case.owner_id].full_name
elif case.owner_id in by_group_id:
case_row[key] = by_group_id[case.owner_id].name
else:
case_row[key] = ''
else:
case_row[key] = getattr(case, key)
for key in case.dynamic_properties():
case_row['dynamic_properties'][key] = render_case_attr(case, key)
case_rows.append(case_row)
def format_dynamic_key(key):
return "d.{key}".format(key=key)
def tidy_up_case_row(case_row):
row = dict([(key, case_row[key]) for key in case_static_keys])
for key in case_dynamic_keys:
row[format_dynamic_key(key)] = case_row['dynamic_properties'].get(key, workbook.undefined)
return row
case_headers = list(case_static_keys)
case_headers.extend([format_dynamic_key(key) for key in case_dynamic_keys])
workbook.open("Cases", case_headers)
for case_row in case_rows:
workbook.write_row("Cases", tidy_up_case_row(case_row))
return workbook
| qedsoftware/commcare-hq | corehq/apps/hqcase/export.py | Python | bsd-3-clause | 3,280 |
from fabric.contrib.files import exists
from fabric.api import run, cd
from .packages import ensure_package
from .utils import program_exists
def ensure_git_repo(path, url, pushurl=None, submodules=False):
if not exists(path):
if not program_exists('git'):
ensure_package('git')
run('git clone %s %s' % (url, path))
if pushurl:
with(cd(path)):
run('git config remote.origin.pushurl %s' % pushurl)
if submodules:
with(cd(path)):
run('git submodule init')
run('git submodule update')
| yejianye/toolbox | fablibs/vcs.py | Python | apache-2.0 | 513 |
# -*- coding: utf8 -*-
from phystricks import *
def Grapheunsurunmoinsx():
pspict,fig = SinglePicture("Grapheunsurunmoinsx")
x=var('x')
f=phyFunction(1/(1-x))
eps=0.21
l=5
f1=f.graph(1-l,1-eps)
f2=f.graph(1+eps,1+l)
f1.parameters.color="red"
Ass=Segment( Point(1,f(1+eps)),Point(1,f(1-eps)) )
Ass.parameters.style="dotted"
pspict.DrawGraphs(f1,f2,Ass)
pspict.DrawDefaultAxes()
pspict.dilatation(1)
fig.conclude()
fig.write_the_file()
| Naereen/mazhe | phystricksGrapheunsurunmoinsx.py | Python | gpl-3.0 | 453 |
#!/usr/bin/python
"""
Program that parses standard format results,
compute and check regression bug.
:copyright: Red Hat 2011-2012
:author: Amos Kong <akong@redhat.com>
"""
from __future__ import division
import os
import sys
import re
import warnings
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
import MySQLdb
def getoutput(cmd):
"""Return output of executing cmd in a shell."""
pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
text = pipe.read()
pipe.close()
if text[-1:] == '\n':
text = text[:-1]
return text
def exec_sql(cmd, conf="../../global_config.ini"):
config = ConfigParser.ConfigParser()
config.read(conf)
user = config.get("AUTOTEST_WEB", "user")
passwd = config.get("AUTOTEST_WEB", "password")
db = config.get("AUTOTEST_WEB", "database")
db_type = config.get("AUTOTEST_WEB", "db_type")
if db_type != 'mysql':
print("regression.py: only support mysql database!")
sys.exit(1)
conn = MySQLdb.connect(host="localhost", user=user,
passwd=passwd, db=db)
cursor = conn.cursor()
cursor.execute(cmd)
rows = cursor.fetchall()
lines = []
for row in rows:
line = []
for c in row:
line.append(str(c))
lines.append(" ".join(line))
cursor.close()
conn.close()
return lines
def get_test_keyval(jobid, keyname, default=''):
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
test_idx = exec_sql('select test_idx from tko_tests where job_idx=%s'
% idx)[3]
try:
return exec_sql('select value from tko_test_attributes'
' where test_idx=%s and attribute="%s"'
% (test_idx, keyname))[-1]
except Exception:
return default
class Sample(object):
""" Collect test results in same environment to a sample """
def __init__(self, sample_type, arg):
def generate_raw_table(test_dict):
ret_dict = []
tmp = []
sample_type = category = None
for i in test_dict:
line = i.split('|')[1:]
if not sample_type:
sample_type = line[0:2]
if sample_type != line[0:2]:
ret_dict.append('|'.join(sample_type + tmp))
sample_type = line[0:2]
tmp = []
if "e+" in line[-1]:
tmp.append("%f" % float(line[-1]))
elif 'e-' in line[-1]:
tmp.append("%f" % float(line[-1]))
elif not (re.findall("[a-zA-Z]", line[-1]) or is_int(line[-1])):
tmp.append("%f" % float(line[-1]))
else:
tmp.append(line[-1])
if category != i.split('|')[0]:
category = i.split('|')[0]
ret_dict.append("Category:" + category.strip())
ret_dict.append(self.categories)
ret_dict.append('|'.join(sample_type + tmp))
return ret_dict
if sample_type == 'filepath':
files = arg.split()
self.files_dict = []
for i in range(len(files)):
fd = open(files[i], "r")
f = []
for line in fd.readlines():
line = line.strip()
if re.findall("^### ", line):
if "kvm-userspace-ver" in line:
self.kvmver = line.split(':')[-1]
elif "kvm_version" in line:
self.hostkernel = line.split(':')[-1]
elif "guest-kernel-ver" in line:
self.guestkernel = line.split(':')[-1]
elif "session-length" in line:
self.len = line.split(':')[-1]
else:
f.append(line.strip())
self.files_dict.append(f)
fd.close()
sysinfodir = os.path.join(os.path.dirname(files[0]), "../../sysinfo/")
sysinfodir = os.path.realpath(sysinfodir)
cpuinfo = getoutput("cat %s/cpuinfo" % sysinfodir)
lscpu = getoutput("cat %s/lscpu" % sysinfodir)
meminfo = getoutput("cat %s/meminfo" % sysinfodir)
lspci = getoutput("cat %s/lspci_-vvnn" % sysinfodir)
partitions = getoutput("cat %s/partitions" % sysinfodir)
fdisk = getoutput("cat %s/fdisk_-l" % sysinfodir)
status_path = os.path.join(os.path.dirname(files[0]), "../status")
status_file = open(status_path, 'r')
content = status_file.readlines()
self.testdata = re.findall("localtime=(.*)\t", content[-1])[-1]
cpunum = len(re.findall("processor\s+: \d", cpuinfo))
cpumodel = re.findall("Model name:\s+(.*)", lscpu)
socketnum = int(re.findall("Socket\(s\):\s+(\d+)", lscpu)[0])
corenum = int(re.findall("Core\(s\) per socket:\s+(\d+)", lscpu)[0]) * socketnum
threadnum = int(re.findall("Thread\(s\) per core:\s+(\d+)", lscpu)[0]) * corenum
numanodenum = int(re.findall("NUMA node\(s\):\s+(\d+)", lscpu)[0])
memnum = float(re.findall("MemTotal:\s+(\d+)", meminfo)[0]) / 1024 / 1024
nicnum = len(re.findall("\d+:\d+\.0 Ethernet", lspci))
disknum = re.findall("sd\w+\S", partitions)
fdiskinfo = re.findall("Disk\s+(/dev/sd.*\s+GiB),", fdisk)
elif sample_type == 'database':
jobid = arg
self.kvmver = get_test_keyval(jobid, "kvm-userspace-ver")
self.hostkernel = get_test_keyval(jobid, "kvm_version")
self.guestkernel = get_test_keyval(jobid, "guest-kernel-ver")
self.len = get_test_keyval(jobid, "session-length")
self.categories = get_test_keyval(jobid, "category")
idx = exec_sql("select job_idx from tko_jobs where afe_job_id=%s"
% jobid)[-1]
data = exec_sql("select test_idx,iteration_key,iteration_value"
" from tko_perf_view where job_idx=%s" % idx)
testidx = None
job_dict = []
test_dict = []
for line in data:
s = line.split()
if not testidx:
testidx = s[0]
if testidx != s[0]:
job_dict.append(generate_raw_table(test_dict))
test_dict = []
testidx = s[0]
test_dict.append(' | '.join(s[1].split('--')[0:] + s[-1:]))
job_dict.append(generate_raw_table(test_dict))
self.files_dict = job_dict
self.version = " userspace: %s\n host kernel: %s\n guest kernel: %s" % (
self.kvmver, self.hostkernel, self.guestkernel)
nrepeat = len(self.files_dict)
if nrepeat < 2:
print("`nrepeat' should be larger than 1!")
sys.exit(1)
self.desc = """<hr>Machine Info:
o CPUs(%s * %s), Cores(%s), Threads(%s), Sockets(%s),
o NumaNodes(%s), Memory(%.1fG), NICs(%s)
o Disks(%s | %s)
Please check sysinfo directory in autotest result to get more details.
(eg: http://autotest-server.com/results/5057-autotest/host1/sysinfo/)
<hr>""" % (cpunum, cpumodel, corenum, threadnum, socketnum, numanodenum, memnum, nicnum, fdiskinfo, disknum)
self.desc += """ - Every Avg line represents the average value based on *%d* repetitions of the same test,
and the following SD line represents the Standard Deviation between the *%d* repetitions.
- The Standard deviation is displayed as a percentage of the average.
- The significance of the differences between the two averages is calculated using unpaired T-test that
takes into account the SD of the averages.
- The paired t-test is computed for the averages of same category.
""" % (nrepeat, nrepeat)
def getAvg(self, avg_update=None):
return self._process_files(self.files_dict, self._get_list_avg,
avg_update=avg_update)
def getAvgPercent(self, avgs_dict):
return self._process_files(avgs_dict, self._get_augment_rate)
def getSD(self):
return self._process_files(self.files_dict, self._get_list_sd)
def getSDRate(self, sds_dict):
return self._process_files(sds_dict, self._get_rate)
def getTtestPvalue(self, fs_dict1, fs_dict2, paired=None, ratio=None):
"""
scipy lib is used to compute p-value of Ttest
scipy: http://www.scipy.org/
t-test: http://en.wikipedia.org/wiki/Student's_t-test
"""
try:
from scipy import stats
import numpy as np
except ImportError:
print("No python scipy/numpy library installed!")
return None
ret = []
s1 = self._process_files(fs_dict1, self._get_list_self, merge=False)
s2 = self._process_files(fs_dict2, self._get_list_self, merge=False)
# s*[line][col] contians items (line*col) of all sample files
for line in range(len(s1)):
tmp = []
if type(s1[line]) != list:
tmp = s1[line]
else:
if len(s1[line][0]) < 2:
continue
for col in range(len(s1[line])):
avg1 = self._get_list_avg(s1[line][col])
avg2 = self._get_list_avg(s2[line][col])
sample1 = np.array(s1[line][col])
sample2 = np.array(s2[line][col])
warnings.simplefilter("ignore", RuntimeWarning)
if (paired):
if (ratio):
(_, p) = stats.ttest_rel(np.log(sample1), np.log(sample2))
else:
(_, p) = stats.ttest_rel(sample1, sample2)
else:
(_, p) = stats.ttest_ind(sample1, sample2)
flag = "+"
if float(avg1) > float(avg2):
flag = "-"
tmp.append(flag + "%f" % (1 - p))
tmp = "|".join(tmp)
ret.append(tmp)
return ret
def _get_rate(self, data):
""" num2 / num1 * 100 """
result = "0.0"
if len(data) == 2 and float(data[0]) != 0:
result = float(data[1]) / float(data[0]) * 100
if result > 100:
result = "%.2f%%" % result
else:
result = "%.4f%%" % result
return result
def _get_augment_rate(self, data):
""" (num2 - num1) / num1 * 100 """
result = "+0.0"
if len(data) == 2 and float(data[0]) != 0:
result = (float(data[1]) - float(data[0])) / float(data[0]) * 100
if result > 100:
result = "%+.2f%%" % result
else:
result = "%+.4f%%" % result
return result
def _get_list_sd(self, data):
"""
sumX = x1 + x2 + ... + xn
avgX = sumX / n
sumSquareX = x1^2 + ... + xn^2
SD = sqrt([sumSquareX - (n * (avgX ^ 2))] / (n - 1))
"""
o_sum = sqsum = 0.0
n = len(data)
for i in data:
o_sum += float(i)
sqsum += float(i) ** 2
avg = o_sum / n
if avg == 0 or n == 1 or sqsum - (n * avg ** 2) <= 0:
return "0.0"
return "%f" % (((sqsum - (n * avg ** 2)) / (n - 1)) ** 0.5)
def _get_list_avg(self, data):
""" Compute the average of list entries """
o_sum = 0.0
for i in data:
o_sum += float(i)
return "%f" % (o_sum / len(data))
def _get_list_self(self, data):
""" Use this to convert sample dicts """
return data
def _process_lines(self, files_dict, row, func, avg_update, merge):
""" Use unified function to process same lines of different samples """
lines = []
ret = []
for i in range(len(files_dict)):
lines.append(files_dict[i][row].split("|"))
for col in range(len(lines[0])):
data_list = []
for i in range(len(lines)):
tmp = lines[i][col].strip()
if is_int(tmp):
data_list.append(int(tmp))
else:
data_list.append(float(tmp))
ret.append(func(data_list))
if avg_update:
for row in avg_update.split('|'):
items = row.split(',')
ret[int(items[0])] = "%f" % (float(ret[int(items[1])]) /
float(ret[int(items[2])]))
if merge:
return "|".join(ret)
return ret
def _process_files(self, files_dict, func, avg_update=None, merge=True):
"""
Process dicts of sample files with assigned function,
func has one list augment.
"""
ret_lines = []
for i in range(len(files_dict[0])):
if re.findall("[a-zA-Z]", files_dict[0][i]):
ret_lines.append(files_dict[0][i].strip())
else:
line = self._process_lines(files_dict, i, func, avg_update,
merge)
ret_lines.append(line)
return ret_lines
def display(lists, rates, allpvalues, f, ignore_col, o_sum="Augment Rate",
prefix0=None, prefix1=None, prefix2=None, prefix3=None):
"""
Display lists data to standard format
param lists: row data lists
param rates: augment rates lists
param f: result output filepath
param ignore_col: do not display some columns
param o_sum: compare result summary
param prefix0: output prefix in head lines
param prefix1: output prefix in Avg/SD lines
param prefix2: output prefix in Diff Avg/P-value lines
param prefix3: output prefix in total Sign line
"""
def str_ignore(out, split=False):
out = out.split("|")
for i in range(ignore_col):
out[i] = " "
if split:
return "|".join(out[ignore_col:])
return "|".join(out)
def tee_line(content, filepath, n=None):
fd = open(filepath, "a")
print(content)
out = ""
out += "<TR ALIGN=CENTER>"
content = content.split("|")
for i in range(len(content)):
if not is_int(content[i]) and is_float(content[i]):
if "+" in content[i] or "-" in content[i]:
if float(content[i]) > 100:
content[i] = "%+.2f" % float(content[i])
else:
content[i] = "%+.4f" % float(content[i])
elif float(content[i]) > 100:
content[i] = "%.2f" % float(content[i])
else:
content[i] = "%.4f" % float(content[i])
if n and i >= 2 and i < ignore_col + 2:
out += "<TD ROWSPAN=%d WIDTH=1%% >%.0f</TD>" % (n, float(content[i]))
else:
out += "<TD WIDTH=1%% >%s</TD>" % content[i]
out += "</TR>"
fd.write(out + "\n")
fd.close()
for l in range(len(lists[0])):
if not re.findall("[a-zA-Z]", lists[0][l]):
break
tee("<TABLE BORDER=1 CELLSPACING=1 CELLPADDING=1 width=10%><TBODY>",
f)
tee("<h3>== %s " % o_sum + "==</h3>", f)
category = 0
for i in range(len(lists[0])):
for n in range(len(lists)):
is_diff = False
for j in range(len(lists)):
if lists[0][i] != lists[j][i]:
is_diff = True
if len(lists) == 1 and not re.findall("[a-zA-Z]", lists[j][i]):
is_diff = True
pfix = prefix1[0]
if len(prefix1) != 1:
pfix = prefix1[n]
if is_diff:
if n == 0:
tee_line(pfix + lists[n][i], f, n=len(lists) + len(rates))
else:
tee_line(pfix + str_ignore(lists[n][i], True), f)
if not is_diff and n == 0:
if '|' in lists[n][i]:
tee_line(prefix0 + lists[n][i], f)
elif "Category:" in lists[n][i]:
if category != 0 and prefix3:
if len(allpvalues[category - 1]) > 0:
tee_line(prefix3 + str_ignore(
allpvalues[category - 1][0]), f)
tee("</TBODY></TABLE>", f)
tee("<br>", f)
tee("<TABLE BORDER=1 CELLSPACING=1 CELLPADDING=1 "
"width=10%><TBODY>", f)
category += 1
tee("<TH colspan=3 >%s</TH>" % lists[n][i], f)
else:
tee("<TH colspan=3 >%s</TH>" % lists[n][i], f)
for n in range(len(rates)):
if lists[0][i] != rates[n][i] and (not re.findall("[a-zA-Z]",
rates[n][i]) or "nan" in rates[n][i]):
tee_line(prefix2[n] + str_ignore(rates[n][i], True), f)
if prefix3 and len(allpvalues[-1]) > 0:
tee_line(prefix3 + str_ignore(allpvalues[category - 1][0]), f)
tee("</TBODY></TABLE>", f)
def analyze(test, sample_type, arg1, arg2, configfile):
""" Compute averages/p-vales of two samples, print results nicely """
config = ConfigParser.ConfigParser()
config.read(configfile)
ignore_col = int(config.get(test, "ignore_col"))
avg_update = config.get(test, "avg_update")
desc = config.get(test, "desc")
def get_list(directory):
result_file_pattern = config.get(test, "result_file_pattern")
cmd = 'find %s|grep "%s.*/%s"' % (directory, test, result_file_pattern)
print(cmd)
return getoutput(cmd)
if sample_type == 'filepath':
arg1 = get_list(arg1)
arg2 = get_list(arg2)
getoutput("rm -f %s.*html" % test)
s1 = Sample(sample_type, arg1)
avg1 = s1.getAvg(avg_update=avg_update)
sd1 = s1.getSD()
s2 = Sample(sample_type, arg2)
avg2 = s2.getAvg(avg_update=avg_update)
sd2 = s2.getSD()
sd1 = s1.getSDRate([avg1, sd1])
sd2 = s1.getSDRate([avg2, sd2])
avgs_rate = s1.getAvgPercent([avg1, avg2])
navg1 = []
navg2 = []
allpvalues = []
tmp1 = []
tmp2 = []
for i in range(len(avg1)):
if not re.findall("[a-zA-Z]", avg1[i]):
tmp1.append([avg1[i]])
tmp2.append([avg2[i]])
elif 'Category' in avg1[i] and i != 0:
navg1.append(tmp1)
navg2.append(tmp2)
tmp1 = []
tmp2 = []
navg1.append(tmp1)
navg2.append(tmp2)
for i in range(len(navg1)):
allpvalues.append(s1.getTtestPvalue(navg1[i], navg2[i], True, True))
pvalues = s1.getTtestPvalue(s1.files_dict, s2.files_dict, False)
rlist = [avgs_rate]
if pvalues:
# p-value list isn't null
rlist.append(pvalues)
desc = desc % s1.len
tee("<pre>####1. Description of setup#1\n%s\n test data: %s</pre>"
% (s1.version, s1.testdata), "%s.html" % test)
tee("<pre>####2. Description of setup#2\n%s\n test data: %s</pre>"
% (s2.version, s2.testdata), "%s.html" % test)
tee("<pre>" + '\n'.join(desc.split('\\n')) + "</pre>", test + ".html")
tee("<pre>" + s1.desc + "</pre>", test + ".html")
display([avg1, sd1, avg2, sd2], rlist, allpvalues, test + ".html",
ignore_col, o_sum="Regression Testing: %s" % test, prefix0="#|Tile|",
prefix1=["1|Avg|", " |%SD|", "2|Avg|", " |%SD|"],
prefix2=["-|%Diff between Avg|", "-|Significance|"],
prefix3="-|Total Significance|")
display(s1.files_dict, [avg1], [], test + ".avg.html", ignore_col,
o_sum="Raw data of sample 1", prefix0="#|Tile|",
prefix1=[" | |"],
prefix2=["-|Avg |"], prefix3="")
display(s2.files_dict, [avg2], [], test + ".avg.html", ignore_col,
o_sum="Raw data of sample 2", prefix0="#|Tile|",
prefix1=[" | |"],
prefix2=["-|Avg |"], prefix3="")
def is_int(n):
try:
int(n)
return True
except ValueError:
return False
def is_float(n):
try:
float(n)
return True
except ValueError:
return False
def tee(content, filepath):
""" Write content to standard output and filepath """
fd = open(filepath, "a")
fd.write(content + "\n")
fd.close()
print(content)
if __name__ == "__main__":
if len(sys.argv) != 5:
this = os.path.basename(sys.argv[0])
print('Usage: %s <testname> filepath <dir1> <dir2>' % this)
print(' or %s <testname> db <jobid1> <jobid2>' % this)
sys.exit(1)
analyze(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], 'perf.conf')
| sathnaga/avocado-vt | scripts/regression.py | Python | gpl-2.0 | 21,208 |
from __future__ import absolute_import
from datetime import datetime
import time
import uuid
import json
from tzlocal import get_localzone
class RemindersService(object):
def __init__(self, service_root, session, params):
self.session = session
self.params = params
self._service_root = service_root
self.lists = {}
self.collections = {}
self.refresh()
def refresh(self):
params_reminders = dict(self.params)
params_reminders.update({
'clientVersion': '4.0',
'lang': 'en-us',
'usertz': get_localzone().zone
})
# Open reminders
req = self.session.get(
self._service_root + '/rd/startup',
params=params_reminders
)
startup = req.json()
self.lists = {}
self.collections = {}
for collection in startup['Collections']:
temp = []
self.collections[collection['title']] = {
'guid': collection['guid'],
'ctag': collection['ctag']
}
for reminder in startup['Reminders']:
if reminder['pGuid'] != collection['guid']:
continue
if 'dueDate' in reminder:
if reminder['dueDate']:
due = datetime(
reminder['dueDate'][1],
reminder['dueDate'][2], reminder['dueDate'][3],
reminder['dueDate'][4], reminder['dueDate'][5]
)
else:
due = None
else:
due = None
if reminder['description']:
desc = reminder['description']
else:
desc = ""
temp.append({
"title": reminder['title'],
"desc": desc,
"due": due
})
self.lists[collection['title']] = temp
def post(self, title, description="", collection=None):
pguid = 'tasks'
if collection:
if collection in self.collections:
pguid = self.collections[collection]['guid']
params_reminders = dict(self.params)
params_reminders.update({
'clientVersion': '4.0',
'lang': 'en-us',
'usertz': get_localzone().zone
})
req = self.session.post(
self._service_root + '/rd/reminders/tasks',
data=json.dumps({
"Reminders": {
'title': title,
"description": description,
"pGuid": pguid,
"etag": None,
"order": None,
"priority": 0,
"recurrence": None,
"alarms": [],
"startDate": None,
"startDateTz": None,
"startDateIsAllDay": False,
"completedDate": None,
"dueDate": None,
"dueDateIsAllDay": False,
"lastModifiedDate": None,
"createdDate": None,
"isFamily": None,
"createdDateExtended": int(time.time()*1000),
"guid": str(uuid.uuid4())
},
"ClientState": {"Collections": self.collections.values()}
}),
params=params_reminders)
return req.ok
| b-jesch/service.fritzbox.callmonitor | resources/lib/PhoneBooks/pyicloud/services/reminders.py | Python | gpl-2.0 | 3,587 |
'''
.. _motionevent:
Motion Event
============
The :class:`MotionEvent` is the base class used for every touch and no-touch
event. This class define all the properties and methods needed to handle 2D and
3D position, but may have more capabilities.
.. note::
You never create the :class:`MotionEvent` yourself, this is the role of the
:mod:`~kivy.input.providers`.
Motion Event and Touch
----------------------
We differentiate Motion Event and Touch event. A Touch event is a
:class:`MotionEvent` with the `pos` profile. Only theses event are dispatched
all over the widget tree.
1. The :class:`MotionEvent` are gathered from input providers
2. All the :class:`MotionEvent` are dispatched in
:func:`~kivy.core.window.Window.on_motion`.
3. If a :class:`MotionEvent` have a `pos` profile, we dispatch them in
:func:`~kivy.core.window.Window.on_touch_down`,move,up.
Listen to Motion Event
----------------------
If you want to receive all Motion Event, Touch or not, you can bind motion event
from :class:`~kivy.core.window.Window` to your own callbacks::
def on_motion(self, etype, motionevent):
# will receive all motion event.
pass
Window.bind(on_motion=on_motion)
Profiles
--------
A capability is the ability of a :class:`MotionEvent` to store a new
information, or a way to indicate what is supported by the Motion Event. For
example, you can receive a Motion Event that have an angle, a fiducial ID, or
even a shape. You can check the :attr:`~MotionEvent.profile` attribute to check
what is currently supported by the Motion Event, and how to access on it.
This is a tiny list of the supported profiles by default. Check other input
providers to know if they are other profiles available.
============== ================================================================
Profile name Description
-------------- ----------------------------------------------------------------
angle 2D angle. Use property `a`
button Mouse button (left, right, middle, scrollup, scrolldown)
Use property `button`
markerid Marker or Fiducial ID. Use property `fid`
pos 2D position. Use properties `x`, `y` or `pos``
pos3d 3D position. Use properties `x`, `y`, `z`
pressure Pressure of the contact. Use property `pressure`
shape Contact shape. Use property `shape`
============== ================================================================
If yo want to know if the current :class:`MotionEvent` have an angle::
def on_touch_move(self, touch):
if 'angle' in touch.profile:
print 'The touch angle is', touch.a
If you want to select only the fiducials::
def on_touch_move(self, touch):
if 'markerid' not in touch.profile:
return
'''
__all__ = ('MotionEvent', )
import weakref
from inspect import isroutine
from copy import copy
from time import time
from kivy.vector import Vector
class EnhancedDictionnary(dict):
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
return super(EnhancedDictionnary, self).__getattr__(attr)
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
class MotionEventMetaclass(type):
def __new__(mcs, name, bases, attrs):
__attrs__ = []
for base in bases:
if hasattr(base, '__attrs__'):
__attrs__.extend(base.__attrs__)
if '__attrs__' in attrs:
__attrs__.extend(attrs['__attrs__'])
attrs['__attrs__'] = tuple(__attrs__)
return super(MotionEventMetaclass, mcs).__new__(mcs, name, bases, attrs)
class MotionEvent(object):
'''Abstract class to represent a touch and no-touch object.
:Parameters:
`id` : str
uniq ID of the Motion Event
`args` : list
list of parameters, passed to depack() function
'''
__metaclass__ = MotionEventMetaclass
__uniq_id = 0
__attrs__ = \
('device', 'push_attrs', 'push_attrs_stack',
'is_touch', 'id', 'shape', 'profile',
# current position, in 0-1 range
'sx', 'sy', 'sz',
# first position set, in 0-1 range
'osx', 'osy', 'osz',
# last position set, in 0-1 range
'psx', 'psy', 'psz',
# delta from the last position and current one, in 0-1 range
'dsx', 'dsy', 'dsz',
# current position, in screen range
'x', 'y', 'z',
# first position set, in screen range
'ox', 'oy', 'oz',
# last position set, in 0-1 range
'px', 'py', 'pz',
# delta from the last position and current one, in screen range
'dx', 'dy', 'dz',
'time_start', 'is_double_tap',
'double_tap_time', 'ud')
def __init__(self, device, id, args):
if self.__class__ == MotionEvent:
raise NotImplementedError('class MotionEvent is abstract')
MotionEvent.__uniq_id += 1
#: True if the Motion Event is a Touch. Can be also verified is `pos` is
#: :attr:`profile`.
self.is_touch = False
#: Attributes to push by default, when we use :func:`push` : x, y, z,
#: dx, dy, dz, ox, oy, oz, px, py, pz.
self.push_attrs_stack = []
self.push_attrs = ('x', 'y', 'z', 'dx', 'dy', 'dz', 'ox', 'oy', 'oz',
'px', 'py', 'pz', 'pos')
#: Uniq ID of the touch. You can safely use this property, it will be
#: never the same accross all existing touches.
self.uid = MotionEvent.__uniq_id
#: Device used for creating this touch
self.device = device
# For grab
self.grab_list = []
self.grab_exclusive_class = None
self.grab_state = False
#: Used to determine which widget the touch is beeing dispatched.
#: Check :func:`grab` function for more information.
self.grab_current = None
#: Profiles currently used in the touch
self.profile = []
#: Id of the touch, not uniq. This is generally the Id set by the input
#: provider, like ID in TUIO. If you have multiple TUIO source, the same
#: id can be used. Prefer to use :attr:`uid` attribute instead.
self.id = id
#: Shape of the touch, subclass of
#: :class:`~kivy.input.shape.Shape`.
#: By default, the property is set to None
self.shape = None
#: X position, in 0-1 range
self.sx = 0.0
#: Y position, in 0-1 range
self.sy = 0.0
#: Z position, in 0-1 range
self.sz = 0.0
#: Origin X position, in 0-1 range.
self.osx = None
#: Origin Y position, in 0-1 range.
self.osy = None
#: Origin Z position, in 0-1 range.
self.osz = None
#: Previous X position, in 0-1 range.
self.psx = None
#: Previous Y position, in 0-1 range.
self.psy = None
#: Previous Z position, in 0-1 range.
self.psz = None
#: Delta between self.sx and self.psx, in 0-1 range.
self.dsx = None
#: Delta between self.sy and self.psy, in 0-1 range.
self.dsy = None
#: Delta between self.sz and self.psz, in 0-1 range.
self.dsz = None
#: X position, in window range
self.x = 0.0
#: Y position, in window range
self.y = 0.0
#: Z position, in window range
self.z = 0.0
#: Origin X position, in window range
self.ox = None
#: Origin Y position, in window range
self.oy = None
#: Origin Z position, in window range
self.oz = None
#: Previous X position, in window range
self.px = None
#: Previous Y position, in window range
self.py = None
#: Previous Z position, in window range
self.pz = None
#: Delta between self.x and self.px, in window range
self.dx = None
#: Delta between self.y and self.py, in window range
self.dy = None
#: Delta between self.z and self.pz, in window range
self.dz = None
#: Position (X, Y), in window range
self.pos = (0.0, 0.0)
#: Initial time of the touch creation
self.time_start = time()
#: Time of the last update
self.time_update = self.time_start
#: Time of the end event (last touch usage)
self.time_end = -1
#: Indicate if the touch is a double tap or not
self.is_double_tap = False
#: If the touch is a :attr:`is_double_tap`, this is the time between the
#: previous tap and the current touch.
self.double_tap_time = 0
#: User data dictionnary. Use this dictionnary to save your own data on
#: the touch.
self.ud = EnhancedDictionnary()
self.depack(args)
def depack(self, args):
'''Depack `args` into attributes in class'''
# set initial position and last position
if self.osx is None:
self.psx = self.osx = self.sx
self.psy = self.osy = self.sy
self.psz = self.osz = self.sz
# update the delta
self.dsx = self.sx - self.psx
self.dsy = self.sy - self.psy
self.dsz = self.sz - self.psz
def grab(self, class_instance, exclusive=False):
'''Grab this motion event. You can grab a touch if you absolutly want to
receive on_touch_move() and on_touch_up(), even if the touch is not
dispatched by your parent::
def on_touch_down(self, touch):
touch.grab(self)
def on_touch_move(self, touch):
if touch.grab_current is self:
# i receive my grabbed touch
else:
# it's a normal touch
def on_touch_up(self, touch):
if touch.grab_current is self:
# i receive my grabbed touch, i must ungrab it !
touch.ungrab(self)
else:
# it's a normal touch
pass
'''
if not self.is_touch:
raise Exception('Grab work only for Touch Motion Event.')
if self.grab_exclusive_class is not None:
raise Exception('Cannot grab the touch, touch are exclusive')
class_instance = weakref.ref(class_instance)
if exclusive:
self.grab_exclusive_class = class_instance
self.grab_list.append(class_instance)
def ungrab(self, class_instance):
'''Ungrab a previous grabbed touch
'''
class_instance = weakref.ref(class_instance)
if self.grab_exclusive_class == class_instance:
self.grab_exclusive_class = None
if class_instance in self.grab_list:
self.grab_list.remove(class_instance)
def move(self, args):
'''Move the touch to another position
'''
self.px = self.x
self.py = self.y
self.pz = self.z
self.psx = self.sx
self.psy = self.sy
self.psz = self.sz
self.time_update = time()
self.depack(args)
def scale_for_screen(self, w, h, p=None, rotation=0):
'''Scale position for the screen
'''
sx, sy = self.sx, self.sy
if rotation == 0:
self.x = sx * float(w)
self.y = sy * float(h)
elif rotation == 90:
sx, sy = sy, 1 - sx
self.x = sx * float(h)
self.y = sy * float(w)
elif rotation == 180:
sx, sy = 1 - sx, 1 - sy
self.x = sx * float(w)
self.y = sy * float(h)
elif rotation == 270:
sx, sy = 1 - sy, sx
self.x = sx * float(h)
self.y = sy * float(w)
if p:
self.z = self.sz * float(p)
if self.ox is None:
self.px = self.ox = self.x
self.py = self.oy = self.y
self.pz = self.oz = self.z
self.dx = self.x - self.px
self.dy = self.y - self.py
self.dz = self.z - self.pz
# cache position
self.pos = self.x, self.y
def push(self, attrs=None):
'''Push attributes values in `attrs` in the stack
'''
if attrs is None:
attrs = self.push_attrs
values = [getattr(self, x) for x in attrs]
self.push_attrs_stack.append((attrs, values))
def pop(self):
'''Pop attributes values from the stack
'''
attrs, values = self.push_attrs_stack.pop()
for i in xrange(len(attrs)):
setattr(self, attrs[i], values[i])
def apply_transform_2d(self, transform):
'''Apply a transformation on x, y, z, px, py, pz,
ox, oy, oz, dx, dy, dz
'''
self.x, self.y = self.pos = transform(self.x, self.y)
self.px, self.py = transform(self.px, self.py)
self.ox, self.oy = transform(self.ox, self.oy)
self.dx = self.x - self.px
self.dy = self.y - self.py
def copy_to(self, to):
'''Copy some attribute to another touch object.'''
for attr in self.__attrs__:
to.__setattr__(attr, copy(self.__getattribute__(attr)))
def distance(self, other_touch):
'''Return the distance between the current touch and another touch.
'''
return Vector(self.pos).distance(other_touch.pos)
def update_time_end(self):
self.time_end = time()
# facilities
@property
def dpos(self):
'''Return delta between last position and current position, in the
screen coordinate system (self.dx, self.dy)'''
return self.dx, self.dy
@property
def opos(self):
'''Return the initial position of the touch in the screen
coordinate system (self.ox, self.oy)'''
return self.ox, self.oy
@property
def ppos(self):
'''Return the previous position of the touch in the screen
coordinate system (self.px, self.py)'''
return self.px, self.py
@property
def spos(self):
'''Return the position in the 0-1 coordinate system
(self.sx, self.sy)'''
return self.sx, self.sy
def __str__(self):
basename = str(self.__class__)
classname = basename.split('.')[-1].replace('>', '').replace('\'', '')
return '<%s spos=%s pos=%s>' % (classname, self.spos, self.pos)
def __repr__(self):
out = []
for x in dir(self):
v = getattr(self, x)
if x[0] == '_':
continue
if isroutine(v):
continue
out.append('%s="%s"' % (x, v))
return '<%s %s>' % (
self.__class__.__name__,
' '.join(out))
@property
def is_mouse_scrolling(self, *args):
'''Returns True if the touch is a mousewheel scrolling
.. versionadded:: 1.6.0
'''
return 'button' in self.profile and 'scroll' in self.button
| happy56/kivy | kivy/input/motionevent.py | Python | lgpl-3.0 | 15,062 |
from django.db import connection
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.tests.utils import (no_mysql, oracle, postgis,
spatialite, HAS_SPATIALREFSYS, SpatialRefSys)
from django.utils import unittest
test_srs = ({'srid' : 4326,
'auth_name' : ('EPSG', True),
'auth_srid' : 4326,
'srtext' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'srtext14' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
'proj4' : '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ',
'spheroid' : 'WGS 84', 'name' : 'WGS 84',
'geographic' : True, 'projected' : False, 'spatialite' : True,
'ellipsoid' : (6378137.0, 6356752.3, 298.257223563), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 1, 9),
},
{'srid' : 32140,
'auth_name' : ('EPSG', False),
'auth_srid' : 32140,
'srtext' : 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
'srtext14': 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],AUTHORITY["EPSG","32140"],AXIS["X",EAST],AXIS["Y",NORTH]]',
'proj4' : '+proj=lcc +lat_1=30.28333333333333 +lat_2=28.38333333333333 +lat_0=27.83333333333333 +lon_0=-99 +x_0=600000 +y_0=4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ',
'spheroid' : 'GRS 1980', 'name' : 'NAD83 / Texas South Central',
'geographic' : False, 'projected' : True, 'spatialite' : False,
'ellipsoid' : (6378137.0, 6356752.31414, 298.257222101), # From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'eprec' : (1, 5, 10),
},
)
@unittest.skipUnless(HAS_GDAL and HAS_SPATIALREFSYS,
"SpatialRefSysTest needs gdal support and a spatial database")
class SpatialRefSysTest(unittest.TestCase):
@no_mysql
def test01_retrieve(self):
"Testing retrieval of SpatialRefSys model objects."
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
self.assertEqual(sd['proj4'], srs.proj4text)
@no_mysql
def test02_osr(self):
"Testing getting OSR objects from SpatialRefSys model objects."
from django.contrib.gis.gdal import GDAL_VERSION
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
if GDAL_VERSION <= (1, 8):
self.assertEqual(sd['proj4'], srs.proj4)
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite
if not spatialite:
if connection.ops.spatial_version >= (1, 4, 0):
srtext = sd['srtext14']
else:
srtext = sd['srtext']
self.assertEqual(srtext, srs.wkt)
@no_mysql
def test03_ellipsoid(self):
"Testing the ellipsoid property."
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
param1 = ellps1[i]
param2 = ellps2[i]
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(SpatialRefSysTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| rebost/django | django/contrib/gis/tests/test_spatialrefsys.py | Python | bsd-3-clause | 6,715 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ipviking_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| norsecorp/ipviking-django | ipviking_django/manage.py | Python | bsd-2-clause | 258 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example sets a bid modifier for the mobile platform on given campaign.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
BID_MODIFIER = '1.5'
def main(client, campaign_id, bid_modifier):
# Initialize appropriate service.
campaign_criterion_service = client.GetService(
'CampaignCriterionService', version='v201502')
# Create mobile platform.The ID can be found in the documentation.
# https://developers.google.com/adwords/api/docs/appendix/platforms
mobile = {
'xsi_type': 'Platform',
'id': '30001'
}
# Create campaign criterion with modified bid.
campaign_criterion = {
'campaignId': campaign_id,
'criterion': mobile,
'bidModifier': bid_modifier
}
# Create operations.
operations = [
{
'operator': 'SET',
'operand': campaign_criterion
}
]
# Make the mutate request.
result = campaign_criterion_service.mutate(operations)
# Display the resulting campaign criteria.
for campaign_criterion in result['value']:
print ('Campaign criterion with campaign id \'%s\' and criterion id \'%s\' '
'was updated with bid modifier \'%s\'.'
% (campaign_criterion['campaignId'],
campaign_criterion['criterion']['id'],
campaign_criterion['bidModifier']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID, BID_MODIFIER)
| wubr2000/googleads-python-lib | examples/adwords/v201502/campaign_management/set_criterion_bid_modifier.py | Python | apache-2.0 | 2,408 |
from django.db import models
class Snippet(models.Model):
"""A text snippet. Not meant for use by anyone other than a designer"""
name = models.CharField(max_length=255)
snippet = models.TextField(blank=True)
class Meta:
pass
def __unicode__(self):
return self.snippet
| callowayproject/django-snippets | snippets/models.py | Python | apache-2.0 | 321 |
"""Defines the Transcript object, which represents a row in a UCSC table.
"""
import itertools
from probe_generator import probe
from probe_generator.sequence_range import SequenceRange
_REQUIRED_FIELDS = (
# Fields which are assumed to exist by Transcript methods.
'name',
'exonStarts',
'exonEnds',
'cdsStart',
'cdsEnd',
'chrom',
)
_GENE_NAME_FIELDS = (
# The names of the fields which might contain the name of a gene in any of
# the supported UCSC file formats.
'name2',
'proteinID',
)
class Transcript(object):
"""Represents a UCSC annotation of a transcript.
Public attributes:
name: unique transcript identifier
gene_id: non-unique gene name identifier
chromosome: self-evident
plus_strand: boolean true if the transcript is on the plus strand
"""
def __init__(self, spec):
"""`spec` is a dict containing the information from a row read from a
UCSC annotation table.
Raises an InvalidAnnotationFile error when the spec does not have the
required fields.
"""
self._spec = spec
self._assert_spec_correct()
self.name = self._spec['name']
self.chromosome = self._spec['chrom'].lstrip('chr')
self.gene_id, = (self._spec[field] for field in _GENE_NAME_FIELDS
if field in self._spec)
self.plus_strand = self._spec['strand'] == '+'
def __hash__(self):
return hash(tuple([value for value in sorted(self._spec.values())]))
def __len__(self):
"""Return the number of coding nucleotides in the transcript.
"""
return sum(exon.end - exon.start for exon in self.coding_exons())
def _assert_spec_correct(self):
"""Raises an InvalidAnnotationFile exception unless all of the
_REQUIRED_FIELDS and exactly one of the _GENE_NAME_FIELDS are present
in the _spec.
"""
if not all(field in self._spec for field in _REQUIRED_FIELDS):
raise InvalidAnnotationFile(
"Annotation file is missing required fields: {}".format(
[field for field in _REQUIRED_FIELDS
if not field in self._spec]))
gene_names = [field for field in _GENE_NAME_FIELDS
if field in self._spec]
if not len(gene_names) == 1:
raise InvalidAnnotationFile(
"Annotation file contains gene id fields: {}. "
"Expected exactly one of {}".format(
gene_names, _GENE_NAME_FIELDS))
def exons(self):
"""Return the exon positions of a UCSC annotation feature.
In a UCSC annotation file, the positions of the starts and ends of exons
are stored as comma-separated strings:
'20,30,40,'
Given a dictionary with this data, we return a list of tuples:
(exonStart, exonEnd)
If the 'strand' of the row is '-', the function return the exons in
reversed order. In this case, the first exon relative to the direction
of transcription (which is probably what the user means), is the last
exon along the chromosome reading from left to right along the '+'
strand (which is how the data are stored in UCSC tables).
Raises a FormattingError when the `row` does not appear to come from a
valid UCSC gene table.
"""
exon_starts = self._spec['exonStarts'].split(',')
exon_ends = self._spec['exonEnds'].split(',')
positions = []
for start, end in zip(exon_starts, exon_ends):
if start != '' and end != '':
start, end = int(start), int(end)
positions.append((start, end))
if not self.plus_strand:
positions.reverse()
return [SequenceRange(self.chromosome, start, end)
for start, end in positions]
def coding_exons(self):
"""As in `exons`, but with the UTRs trimmed out.
"""
cds_start = int(self._spec['cdsStart'])
cds_end = int(self._spec['cdsEnd'])
exon_positions = self.exons()
positions = []
if not self.plus_strand:
exon_positions.reverse()
for exon in exon_positions:
if exon.end < cds_start:
pass
elif exon.start <= cds_start <= cds_end <= exon.end:
positions.append((cds_start, cds_end))
break
elif exon.start <= cds_start <= exon.end:
positions.append((cds_start, exon.end))
elif cds_start <= exon.start <= exon.end <= cds_end:
positions.append((exon.start, exon.end))
elif exon.start <= cds_end <= exon.end:
positions.append((exon.start, cds_end))
break
elif cds_end <= exon.start:
break
else:
assert False, "unreachable: {}/{}".format(self.name, self.gene_id)
if not self.plus_strand:
positions.reverse()
return [SequenceRange(self.chromosome, start, end)
for start, end in positions]
def exon(self, index):
"""Given the one-based index of an exon, return a SequenceRange object
representing the genomic coordinates of that exon.
Raise a NoFeature error when the exon is out of the bounds of the
transcript.
"""
try:
return self.exons()[index-1]
except IndexError as error:
raise NoFeature("{}: {}/{}".format(
error, self.gene_id, self.name))
def nucleotide_index(self, index):
"""Given a 1-based base pair index, return a SequenceRange object
representing the base pair at that index in the transcript.
"""
base_index = self._transcript_index(index)
return SequenceRange(self.chromosome, base_index, base_index+1)
def codon_index(self, index):
"""Given a 1-based codon index, return a SequenceRange object
representing that codon.
"""
base_index = self._transcript_index(index*3)
if self.plus_strand:
return SequenceRange(self.chromosome, base_index-2, base_index+1)
else:
return SequenceRange(self.chromosome, base_index, base_index+3)
def base_index(self, sequence_range):
"""Given a SequenceRange object representing a genomic location within
the transcript, return the one-based index of nucleotide at the start
of the sequence-range object.
Raises an OutOfRange error when the `sequence_range` is not within the
transcript.
"""
for i in range(1, len(self)+1):
nucleotide = self.nucleotide_index(i)
if sequence_range.start == nucleotide.start:
return i
raise OutOfRange
def transcript_range(self, start, end):
"""Return a list of SequenceRange objects representing the genomic
location(s) of the transcript from `start` to `end`.
More than one SequenceRange is returned if the requested range crosses
exon boundaries.
The `start` and `end` variables are 1-based left-inclusive,
right-exclusive.
"""
ranges = [self.nucleotide_index(i) for i in range(start, end)]
return SequenceRange.condense(*ranges)
def _transcript_index(self, index):
"""Given the 1-based index of a nucleotide in the coding sequence,
return the 0-based genomic index of that nucleotide as an integer.
"""
indices = []
for exon in self.coding_exons():
nucleotide_range = list(range(exon.start, exon.end))
if not self.plus_strand:
nucleotide_range.reverse()
indices.append(nucleotide_range)
base_coordinates = itertools.chain(*indices)
try:
base_index = next(itertools.islice(
base_coordinates,
index-1,
None))
except StopIteration:
raise OutOfRange(
"Base {} is outside the range of transcript '{}'".format(
index, self.name))
return base_index
class InvalidAnnotationFile(Exception):
"""Raised when format assumptions about the table used to generate the
transcript annotations are violated.
"""
class OutOfRange(probe.NonFatalError):
"""Raised when a base index outside the range of a transcript is specified.
"""
class NoFeature(probe.NonFatalError):
"""Raised when the index of the exon is outside the range of the
transcript.
"""
| bcgsc/ProbeGenerator | probe_generator/transcript.py | Python | gpl-3.0 | 8,753 |
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_fiptables
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests of iptables handling function.
"""
from collections import defaultdict
import copy
import logging
import re
from mock import patch, call, Mock, ANY
from calico.felix import fiptables
from calico.felix.fiptables import IptablesUpdater
from calico.felix.futils import FailedSystemCall
from calico.felix.test.base import BaseTestCase
_log = logging.getLogger(__name__)
EXTRACT_UNREF_TESTS = [
("""Chain INPUT (policy DROP)
target prot opt source destination
felix-INPUT all -- anywhere anywhere
ACCEPT tcp -- anywhere anywhere tcp dpt:domain
Chain FORWARD (policy DROP)
target prot opt source destination
felix-FORWARD all -- anywhere anywhere
ufw-track-forward all -- anywhere anywhere
Chain DOCKER (1 references)
target prot opt source destination
Chain felix-FORWARD (1 references)
target prot opt source destination
felix-FROM-ENDPOINT all -- anywhere anywhere
felix-TO-ENDPOINT all -- anywhere anywhere
Chain-with-bad-name all -- anywhere anywhere
ACCEPT all -- anywhere anywhere
Chain felix-temp (0 references)
target prot opt source destination
felix-FROM-ENDPOINT all -- anywhere anywhere
ACCEPT all -- anywhere anywhere
""",
set(["felix-temp"])),
]
MISSING_CHAIN_DROP = '--append %s --jump DROP -m comment --comment "WARNING Missing chain DROP:"'
class TestIptablesUpdater(BaseTestCase):
def setUp(self):
super(TestIptablesUpdater, self).setUp()
self.stub = IptablesStub("filter")
self.m_config = Mock()
self.m_config.REFRESH_INTERVAL = 0 # disable refresh thread
self.ipt = IptablesUpdater("filter", self.m_config, 4)
self.ipt._execute_iptables = self.stub.apply_iptables_restore
self.check_output_patch = patch("gevent.subprocess.check_output",
autospec=True)
self.m_check_output = self.check_output_patch.start()
self.m_check_output.side_effect = self.fake_check_output
def fake_check_output(self, cmd, *args, **kwargs):
_log.info("Stubbing out call to %s", cmd)
if cmd == ["iptables-save", "--table", "filter"]:
return self.stub.generate_iptables_save()
elif cmd == ['iptables', '--wait', '--list', '--table', 'filter']:
return self.stub.generate_iptables_list()
else:
raise AssertionError("Unexpected call %r" % cmd)
def tearDown(self):
self.check_output_patch.stop()
super(TestIptablesUpdater, self).tearDown()
def test_rewrite_chains_stub(self):
"""
Tests that referencing a chain causes it to get stubbed out.
"""
self.ipt.rewrite_chains(
{"foo": ["--append foo --jump bar"]},
{"foo": set(["bar"])},
async=True,
)
self.step_actor(self.ipt)
self.assertEqual(self.stub.chains_contents,
{"foo": ["--append foo --jump bar"],
'bar': [MISSING_CHAIN_DROP % "bar"]})
def test_rewrite_chains_cover(self):
"""
Hits remaining code paths in rewrite chains.
"""
cb = Mock()
self.ipt.rewrite_chains(
{"foo": ["--append foo --jump bar"]},
{"foo": set(["bar"])},
suppress_upd_log=True,
async=True,
callback=cb,
)
self.step_actor(self.ipt)
cb.assert_called_once_with(None)
def test_delete_required_chain_stub(self):
"""
Tests that deleting a required chain stubs it out instead.
"""
# Exit the graceful restart period, during which we do not stub out
# chains.
self.ipt.cleanup(async=True)
# Install a couple of chains. foo depends on bar.
self.ipt.rewrite_chains(
{"foo": ["--append foo --jump bar"],
"bar": ["--append bar --jump ACCEPT"]},
{"foo": set(["bar"]),
"bar": set()},
async=True,
)
self.step_actor(self.ipt)
# Both chains should be programmed as normal.
self.assertEqual(self.stub.chains_contents,
{"foo": ["--append foo --jump bar"],
'bar': ["--append bar --jump ACCEPT"] })
# Deleting bar should stub it out instead.
self.ipt.delete_chains(["bar"], async=True)
self.step_actor(self.ipt)
self.assertEqual(self.stub.chains_contents,
{"foo": ["--append foo --jump bar"],
'bar': [MISSING_CHAIN_DROP % "bar"] })
def test_cleanup_with_dependencies(self):
# Set up the dataplane with some chains that the IptablesUpdater
# doesn't know about and some that it will know about.
self.stub.apply_iptables_restore("""
*filter
:INPUT DROP [10:505]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [40:1600]
# These non-felix chains should be ignored
:ignore-me -
:ignore-me-too -
# These are left-over felix chains. Some depend on each other. They
# can only be cleaned up in the correct order.
:felix-foo - [0:0]
:felix-bar -
:felix-foo -
:felix-baz -
:felix-biff -
--append felix-foo --src 10.0.0.1/32 --jump felix-bar
# baz depends on biff; cleanup needs to detect that.
--append felix-baz --src 10.0.0.2/32 --jump felix-biff
--append felix-biff --src 10.0.0.3/32 --jump DROP
--append ignore-me --jump ignore-me-too
--append ignore-me-too --jump DROP
""".splitlines())
# IptablesUpdater hears about some chains before the cleanup. These
# partially overlap with the ones that are already there.
self.ipt.rewrite_chains(
{"felix-foo": ["--append felix-foo --jump felix-bar",
"--append felix-foo --jump felix-baz",
"--append felix-foo --jump felix-boff"],
"felix-bar": ["--append felix-bar --jump ACCEPT"]},
# felix-foo depends on:
# * a new chain that's also being programmed
# * a pre-existing chain that is present at start of day
# * a new chain that isn't present at all.
{"felix-foo": set(["felix-bar", "felix-baz", "felix-boff"]),
"felix-bar": set()},
async=True,
)
self.step_actor(self.ipt)
# Dataplane should now have all the new chains in place, including
# a stub for felix-boff. However, the old chains should not have been
# cleaned up.
self.stub.assert_chain_contents({
"INPUT": [],
"FORWARD": [],
"OUTPUT": [],
"ignore-me": ["--append ignore-me --jump ignore-me-too"],
"ignore-me-too": ["--append ignore-me-too --jump DROP"],
"felix-foo": ["--append felix-foo --jump felix-bar",
"--append felix-foo --jump felix-baz",
"--append felix-foo --jump felix-boff"],
"felix-bar": ["--append felix-bar --jump ACCEPT"],
"felix-baz": ["--append felix-baz --src 10.0.0.2/32 "
"--jump felix-biff"],
"felix-boff": [MISSING_CHAIN_DROP % "felix-boff"],
"felix-biff": ["--append felix-biff --src 10.0.0.3/32 --jump DROP"],
})
# Issue the cleanup.
self.ipt.cleanup(async=True)
self.step_actor(self.ipt)
# Should now have stubbed-out chains for all the ones that are not
# programmed.
self.stub.assert_chain_contents({
# Non felix chains ignored:
"INPUT": [],
"FORWARD": [],
"OUTPUT": [],
"ignore-me": ["--append ignore-me --jump ignore-me-too"],
"ignore-me-too": ["--append ignore-me-too --jump DROP"],
# Explicitly-programmed chains programmed.
"felix-foo": ["--append felix-foo --jump felix-bar",
"--append felix-foo --jump felix-baz",
"--append felix-foo --jump felix-boff"],
"felix-bar": ["--append felix-bar --jump ACCEPT"],
# All required but unknown chains stubbed.
"felix-baz": [MISSING_CHAIN_DROP % "felix-baz"],
"felix-boff": [MISSING_CHAIN_DROP % "felix-boff"],
# felix-biff deleted, even though it was referenced by felix-baz
# before.
})
def test_cleanup_bad_read_back(self):
# IptablesUpdater hears about some chains before the cleanup.
self.ipt.rewrite_chains(
{"felix-foo": ["--append felix-foo --jump felix-boff"]},
{"felix-foo": set(["felix-boff"])},
async=True,
)
self.step_actor(self.ipt)
self.stub.assert_chain_contents({
"felix-foo": ["--append felix-foo --jump felix-boff"],
"felix-boff": [MISSING_CHAIN_DROP % "felix-boff"],
})
# Some other process then breaks our chains.
self.stub.chains_contents = {}
self.stub.iptables_save_output = [
None, # Start of cleanup.
# End of cleanup. Out of sync:
"*filter\n"
":INPUT DROP [68:4885]\n"
":FORWARD DROP [0:0]\n"
":OUTPUT ACCEPT [20:888]\n"
":DOCKER - [0:0]\n"
"-A INPUT -i lxcbr0 -p tcp -m tcp --dport 53 -j ACCEPT\n"
"-A FORWARD -o lxcbr0 -j ACCEPT\n"
"COMMIT\n"
]
_log.info("Forcing iptables-save to always return %s",
self.stub.iptables_save_output)
# Issue the cleanup.
with patch.object(fiptables._log, "error") as m_error:
self.ipt.cleanup(async=True)
self.step_actor(self.ipt)
m_error.assert_called_once_with(
ANY,
set([]),
set([]),
set(["felix-foo", "felix-boff"])
)
self.stub.assert_chain_contents({
"felix-foo": ["--append felix-foo --jump felix-boff"],
"felix-boff": [MISSING_CHAIN_DROP % "felix-boff"],
})
def test_ensure_rule_inserted(self):
fragment = "FOO --jump DROP"
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "",
"line 2 failed"),
None,
None])
self.ipt.ensure_rule_inserted(fragment, async=True)
self.step_actor(self.ipt)
self.assertEqual(
m_exec.mock_calls,
[
call(["*filter",
"--delete FOO --jump DROP",
"--insert FOO --jump DROP",
"COMMIT"],
fail_log_level=logging.DEBUG),
call(["*filter",
"--insert FOO --jump DROP",
"COMMIT"]),
])
self.assertTrue(fragment in self.ipt._inserted_rule_fragments)
def test_insert_remove_tracking(self):
fragment = "FOO --jump DROP"
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = [
# Insert.
None,
# Remove: requires an exception to terminate loop.
None,
FailedSystemCall("Message", [], 1, "", "line 2 failed"),
# Insert.
None,
]
self.ipt.ensure_rule_inserted(fragment, async=True)
self.step_actor(self.ipt)
self.assertTrue(fragment in self.ipt._inserted_rule_fragments)
self.assertTrue(fragment not in self.ipt._removed_rule_fragments)
self.ipt.ensure_rule_removed(fragment, async=True)
self.step_actor(self.ipt)
self.assertTrue(fragment not in self.ipt._inserted_rule_fragments)
self.assertTrue(fragment in self.ipt._removed_rule_fragments)
self.ipt.ensure_rule_inserted(fragment, async=True)
self.step_actor(self.ipt)
self.assertTrue(fragment in self.ipt._inserted_rule_fragments)
self.assertTrue(fragment not in self.ipt._removed_rule_fragments)
def test_ensure_rule_removed(self):
fragment = "FOO --jump DROP"
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([None,
FailedSystemCall("Message", [], 1, "",
"line 2 failed")])
self.ipt.ensure_rule_removed(fragment, async=True)
self.step_actor(self.ipt)
exp_call = call([
'*filter',
'--delete FOO --jump DROP',
'COMMIT',
], fail_log_level=logging.DEBUG)
self.assertEqual(m_exec.mock_calls, [exp_call] * 2)
def test_ensure_rule_removed_not_present(self):
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "",
"line 2 failed")])
self.ipt.ensure_rule_removed("FOO --jump DROP", async=True)
self.step_actor(self.ipt)
exp_call = call([
'*filter',
'--delete FOO --jump DROP',
'COMMIT',
], fail_log_level=logging.DEBUG)
self.assertEqual(m_exec.mock_calls, [exp_call])
def test_ensure_rule_removed_missing_dep(self):
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([
FailedSystemCall("Message", [], 1, "",
"at line: 2\n"
"ipset doesn't exist")])
self.ipt.ensure_rule_removed("FOO --jump DROP", async=True)
self.step_actor(self.ipt)
exp_call = call([
'*filter',
'--delete FOO --jump DROP',
'COMMIT',
], fail_log_level=logging.DEBUG)
self.assertEqual(m_exec.mock_calls, [exp_call])
def test_ensure_rule_removed_error(self):
with patch.object(self.ipt, "_execute_iptables") as m_exec:
m_exec.side_effect = iter([FailedSystemCall("Message", [], 1, "",
"the foo is barred")])
f = self.ipt.ensure_rule_removed("FOO --jump DROP", async=True)
self.step_actor(self.ipt)
self.assertRaises(FailedSystemCall, f.get)
exp_call = call([
'*filter',
'--delete FOO --jump DROP',
'COMMIT',
], fail_log_level=logging.DEBUG)
self.assertEqual(m_exec.mock_calls, [exp_call])
def test_refresh_iptables(self):
self.ipt.ensure_rule_inserted("INPUT -j ACCEPT", async=True)
self.ipt.ensure_rule_inserted("INPUT -j DROP", async=True)
self.ipt.ensure_rule_removed("INPUT -j DROP", async=True)
self.step_actor(self.ipt)
self.ipt.refresh_iptables(async=True)
with patch.object(self.ipt, "_insert_rule") as m_insert_rule:
with patch.object(self.ipt, "_remove_rule") as m_remove_rule:
self.step_actor(self.ipt)
m_insert_rule.assert_called_once_with("INPUT -j ACCEPT",
log_level=logging.DEBUG)
m_remove_rule.assert_called_once_with("INPUT -j DROP",
log_level=logging.DEBUG)
class TestIptablesStub(BaseTestCase):
"""
Tests of our dummy iptables "stub". It's sufficiently complex
that giving it a few tests of its own adds a lot of confidence to
the tests that really rely on it.
"""
def setUp(self):
super(TestIptablesStub, self).setUp()
self.stub = IptablesStub("filter")
def test_gen_ipt_save(self):
self.stub.chains_contents = {
"foo": ["--append foo"]
}
self.assertEqual(
self.stub.generate_iptables_save(),
"*filter\n"
":foo - [0:0]\n"
"--append foo\n"
"COMMIT"
)
def test_gen_ipt_list(self):
self.stub.apply_iptables_restore("""
*filter
:foo - [0:0]
:bar -
--append foo --src 10.0.0.8/32 --jump bar
--append bar --jump DROP
""".splitlines())
self.assertEqual(
self.stub.generate_iptables_list(),
"Chain bar (1 references)\n"
"target prot opt source destination\n"
"DROP dummy -- anywhere anywhere\n"
"\n"
"Chain foo (0 references)\n"
"target prot opt source destination\n"
"bar dummy -- anywhere anywhere\n"
)
class TestUtilityFunctions(BaseTestCase):
def test_extract_unreffed_chains(self):
for inp, exp in EXTRACT_UNREF_TESTS:
output = fiptables._extract_our_unreffed_chains(inp)
self.assertEqual(exp, output, "Expected\n\n%s\n\nTo parse as: %s\n"
"but got: %s" % (inp, exp, output))
class IptablesStub(object):
"""
Fake version of the dataplane, accepts iptables-restore input and
stores it off. Can generate dummy versions of the corresponding
iptables-save and iptables --list output.
"""
def __init__(self, table):
self.table = table
self.chains_contents = defaultdict(list)
self.chain_dependencies = defaultdict(set)
self.new_contents = None
self.new_dependencies = None
self.declared_chains = None
self.iptables_save_output = []
def generate_iptables_save(self):
if self.iptables_save_output:
output = self.iptables_save_output.pop(0)
if output:
_log.debug("Forcing iptables-save output")
return output
lines = ["*" + self.table]
for chain_name in sorted(self.chains_contents.keys()):
lines.append(":%s - [0:0]" % chain_name)
for _, chain_content in sorted(self.chains_contents.items()):
lines.extend(chain_content)
lines.append("COMMIT")
return "\n".join(lines)
def generate_iptables_list(self):
_log.debug("Generating iptables --list for chains %s\n%s",
self.chains_contents, self.chain_dependencies)
chunks = []
for chain, entries in sorted(self.chains_contents.items()):
num_refs = 0
for deps in self.chain_dependencies.values():
if chain in deps:
num_refs += 1
chain_lines = [
"Chain %s (%s references)" % (chain, num_refs),
"target prot opt source destination"]
for rule in entries:
m = re.search(r'(?:--jump|-j|--goto|-g)\s+(\S+)', rule)
assert m, "Failed to generate listing for %r" % rule
action = m.group(1)
chain_lines.append(action + " dummy -- anywhere anywhere")
chunks.append("\n".join(chain_lines))
return "\n\n".join(chunks) + "\n"
def apply_iptables_restore(self, lines, **kwargs):
_log.debug("iptables-restore input:\n%s", "\n".join(lines))
table_name = None
self.new_contents = copy.deepcopy(self.chains_contents)
self.declared_chains = set()
self.new_dependencies = copy.deepcopy(self.chain_dependencies)
for line in lines:
line = line.strip()
if line.startswith("#") or not line:
continue
elif line.startswith("*"):
table_name = line[1:]
_log.debug("Processing table %s", table_name)
assert table_name == self.table
elif line.startswith(":"):
assert table_name, "Table should occur before chains."
splits = line[1:].split(" ")
_log.debug("Forward declaration %s, flushing chain", splits)
if len(splits) == 3:
chain_name, policy, counts = splits
if not re.match(r'\[\d+:\d+\]', counts):
raise AssertionError("Bad counts: %r" % line)
elif len(splits) == 2:
chain_name, policy = splits
else:
raise AssertionError(
"Invalid chain forward declaration line %r" % line)
if policy not in ("-", "DROP", "ACCEPT"):
raise AssertionError("Unexpected policy %r" % line)
self.declared_chains.add(chain_name)
self.new_contents[chain_name] = []
self.new_dependencies[chain_name] = set()
elif line.strip() == "COMMIT":
self._handle_commit()
else:
# Should be a rule fragment of some sort
assert table_name, "Table should occur before rules."
self._handle_rule(line)
# Implicit commit at end.
self._handle_commit()
def _handle_rule(self, rule):
splits = rule.split(" ")
ipt_op = splits[0]
chain = splits[1]
_log.debug("Rule op: %s, chain name: %s", ipt_op, chain)
if ipt_op in ("--append", "-A", "--insert", "-I"):
self.assert_chain_declared(chain, ipt_op)
if ipt_op in ("--append", "-A"):
self.new_contents[chain].append(rule)
else:
self.new_contents[chain].insert(0, rule)
m = re.search(r'(?:--jump|-j|--goto|-g)\s+(\S+)', rule)
if m:
action = m.group(1)
_log.debug("Action %s", action)
if action not in ("MARK", "ACCEPT", "DROP", "RETURN"):
# Assume a dependent chain.
self.new_dependencies[chain].add(action)
elif ipt_op in ("--delete-chain", "-X"):
self.assert_chain_declared(chain, ipt_op)
del self.new_contents[chain]
del self.new_dependencies[chain]
elif ipt_op in ("--flush", "-F"):
self.assert_chain_declared(chain, ipt_op)
self.new_contents[chain] = []
self.new_dependencies[chain] = set()
elif ipt_op in ("--delete", "-D"):
self.assert_chain_declared(chain, ipt_op)
for rule in self.new_contents.get(chain, []):
rule_fragment = " ".join(splits[1:])
if rule.endswith(rule_fragment):
self.new_contents[chain].remove(rule)
break
else:
raise FailedSystemCall("Delete for non-existent rule", [], 1,
"", "line 2 failed")
else:
raise AssertionError("Unknown operation %s" % ipt_op)
def assert_chain_declared(self, chain, ipt_op):
kernel_chains = set(["INPUT", "FORWARD", "OUTPUT"])
if chain not in self.declared_chains and chain not in kernel_chains:
raise AssertionError("%s to non-existent chain %s" %
(ipt_op, chain))
def _handle_commit(self):
for chain, deps in self.chain_dependencies.iteritems():
for dep in deps:
if dep not in self.new_contents:
raise AssertionError("Chain %s depends on %s but that "
"chain is not present" % (chain, dep))
self.chains_contents = self.new_contents
self.chain_dependencies = self.new_dependencies
def assert_chain_contents(self, expected):
differences = zip(sorted(self.chains_contents.items()),
sorted(expected.items()))
differences = ["%s != %s" % (p1, p2) for
(p1, p2) in differences
if p1 != p2]
if self.chains_contents != expected:
raise AssertionError("Differences:\n%s" % "\n".join(differences))
class TestTransaction(BaseTestCase):
def setUp(self):
super(TestTransaction, self).setUp()
self.txn = fiptables._Transaction(
{
"felix-a": [], "felix-b": [], "felix-c": []
},
defaultdict(set, {"felix-a": set(["felix-b", "felix-stub"])}),
defaultdict(set, {"felix-b": set(["felix-a"]),
"felix-stub": set(["felix-a"])}),
)
def test_rewrite_existing_chain_remove_stub_dependency(self):
"""
Test that a no-longer-required stub is deleted.
"""
self.txn.store_rewrite_chain("felix-a", ["foo"], set(["felix-b"]))
self.assertEqual(self.txn.affected_chains,
set(["felix-a", "felix-stub"]))
self.assertEqual(self.txn.chains_to_stub_out, set([]))
self.assertEqual(self.txn.chains_to_delete, set(["felix-stub"]))
self.assertEqual(self.txn.referenced_chains, set(["felix-b"]))
self.assertEqual(
self.txn.prog_chains,
{
"felix-a": ["foo"],
"felix-b": [],
"felix-c": []
})
self.assertEqual(self.txn.required_chns,
{"felix-a": set(["felix-b"])})
self.assertEqual(self.txn.requiring_chns,
{"felix-b": set(["felix-a"])})
def test_rewrite_existing_chain_remove_normal_dependency(self):
"""
Test that removing a dependency on an explicitly programmed chain
correctly updates the indices.
"""
self.txn.store_rewrite_chain("felix-a", ["foo"], set(["felix-stub"]))
self.assertEqual(self.txn.affected_chains, set(["felix-a"]))
self.assertEqual(self.txn.chains_to_stub_out, set([]))
self.assertEqual(self.txn.chains_to_delete, set([]))
self.assertEqual(self.txn.referenced_chains, set(["felix-stub"]))
self.assertEqual(
self.txn.prog_chains,
{
"felix-a": ["foo"],
"felix-b": [],
"felix-c": [],
})
self.assertEqual(self.txn.required_chns,
{"felix-a": set(["felix-stub"])})
self.assertEqual(self.txn.requiring_chns,
{"felix-stub": set(["felix-a"])})
def test_unrequired_chain_delete(self):
"""
Test that deleting an orphan chain triggers deletion and
updates the indices.
"""
self.txn.store_delete("felix-c")
self.assertEqual(self.txn.affected_chains, set(["felix-c"]))
self.assertEqual(self.txn.chains_to_stub_out, set([]))
self.assertEqual(self.txn.chains_to_delete, set(["felix-c"]))
self.assertEqual(self.txn.referenced_chains,
set(["felix-b", "felix-stub"]))
self.assertEqual(
self.txn.prog_chains,
{
"felix-a": [],
"felix-b": [],
})
self.assertEqual(self.txn.required_chns,
{"felix-a": set(["felix-b", "felix-stub"])})
self.assertEqual(self.txn.requiring_chns,
{"felix-b": set(["felix-a"]),
"felix-stub": set(["felix-a"])})
def test_required_deleted_chain_gets_stubbed(self):
"""
Test that deleting a chain that is still required results in it
being stubbed out.
"""
self.txn.store_delete("felix-b")
self.assertEqual(self.txn.affected_chains, set(["felix-b"]))
self.assertEqual(self.txn.chains_to_stub_out, set(["felix-b"]))
self.assertEqual(self.txn.chains_to_delete, set())
self.assertEqual(self.txn.referenced_chains,
set(["felix-b", "felix-stub"]))
self.assertEqual(
self.txn.prog_chains,
{
"felix-a": [],
"felix-c": [],
})
self.assertEqual(self.txn.required_chns,
{"felix-a": set(["felix-b", "felix-stub"])})
self.assertEqual(self.txn.requiring_chns,
{"felix-b": set(["felix-a"]),
"felix-stub": set(["felix-a"])})
def test_cache_invalidation(self):
self.assert_cache_dropped()
self.assert_properties_cached()
self.txn.store_delete("felix-a")
self.assert_cache_dropped()
def test_cache_invalidation_2(self):
self.assert_cache_dropped()
self.assert_properties_cached()
self.txn.store_rewrite_chain("felix-a", [], {})
self.assert_cache_dropped()
def assert_properties_cached(self):
self.assertEqual(self.txn.affected_chains, set())
self.assertEqual(self.txn.chains_to_stub_out, set())
self.assertEqual(self.txn.chains_to_delete, set())
self.assertEqual(self.txn._affected_chains, set())
self.assertEqual(self.txn._chains_to_stub, set())
self.assertEqual(self.txn._chains_to_delete, set())
def assert_cache_dropped(self):
self.assertEqual(self.txn._affected_chains, None)
self.assertEqual(self.txn._chains_to_stub, None)
self.assertEqual(self.txn._chains_to_delete, None)
| anortef/calico | calico/felix/test/test_fiptables.py | Python | apache-2.0 | 30,589 |
#!/usr/bin/env python3
# import cgitb
# cgitb.enable()
from arthash import fileappend
from arthash.experimental import hash_distributions
HEADER = """Content-Type: text/plain
"""
printer = print # noqa T001
def run_cgi(filename, hash_code):
printer(HEADER)
try:
fileappend.append_hash(filename, hash_code)
except Exception as e:
printer('ERROR:', e, 'for hash', '"%s"' % hash_code)
raise
else:
printer('SUCCESS:', hash_code)
if __name__ == '__main__':
import sys
# When called from CGI, the filename is a fixed constant.
try:
filename = sys.argv[1]
except:
filename = '/tmp/arthashlist.txt'
# When called from CGI, the sha256_hash is a field from the form.
try:
sha256_hash = sys.argv[2]
except:
sha256_hash = hash_distributions.random_hash()
run_cgi(filename, sha256_hash)
| arthash/arthash | arthash/old/cgi.py | Python | artistic-2.0 | 900 |
import os
import sys
import subprocess
import tempfile
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree, copytree
from tempfile import mkdtemp
from contextlib import contextmanager
from twisted.trial import unittest
from twisted.internet import defer
import scrapy
from scrapy.utils.python import to_native_str
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
with tempfile.TemporaryFile() as out:
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd,
env=self.env, **kwargs)
def proc(self, *new_args, **popen_kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**popen_kwargs)
waited = 0
interval = 0.2
while p.poll() is None:
sleep(interval)
waited += interval
if waited > 15:
p.kill()
assert False, 'Command took too much time to complete'
return p
class StartprojectTest(ProjectTest):
def test_startproject(self):
self.assertEqual(0, self.call('startproject', self.project_name))
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
self.assertEqual(1, self.call('startproject', 'sys'))
def test_startproject_with_project_dir(self):
project_dir = mkdtemp()
self.assertEqual(0, self.call('startproject', self.project_name, project_dir))
assert exists(join(abspath(project_dir), 'scrapy.cfg'))
assert exists(join(abspath(project_dir), 'testproject'))
assert exists(join(join(abspath(project_dir), self.project_name), '__init__.py'))
assert exists(join(join(abspath(project_dir), self.project_name), 'items.py'))
assert exists(join(join(abspath(project_dir), self.project_name), 'pipelines.py'))
assert exists(join(join(abspath(project_dir), self.project_name), 'settings.py'))
assert exists(join(join(abspath(project_dir), self.project_name), 'spiders', '__init__.py'))
self.assertEqual(0, self.call('startproject', self.project_name, project_dir + '2'))
self.assertEqual(1, self.call('startproject', self.project_name, project_dir))
self.assertEqual(1, self.call('startproject', self.project_name + '2', project_dir))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
self.assertEqual(1, self.call('startproject', 'sys'))
self.assertEqual(2, self.call('startproject'))
self.assertEqual(2, self.call('startproject', self.project_name, project_dir, 'another_params'))
class StartprojectTemplatesTest(ProjectTest):
def setUp(self):
super(StartprojectTemplatesTest, self).setUp()
self.tmpl = join(self.temp_path, 'templates')
self.tmpl_proj = join(self.tmpl, 'project')
def test_startproject_template_override(self):
copytree(join(scrapy.__path__[0], 'templates'), self.tmpl)
with open(join(self.tmpl_proj, 'root_template'), 'w'):
pass
assert exists(join(self.tmpl_proj, 'root_template'))
args = ['--set', 'TEMPLATES_DIR=%s' % self.tmpl]
p = self.proc('startproject', self.project_name, *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("New Scrapy project %r, using template directory" % self.project_name, out)
self.assertIn(self.tmpl_proj, out)
assert exists(join(self.proj_path, 'root_template'))
class CommandTest(ProjectTest):
def setUp(self):
super(CommandTest, self).setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = ['--template=%s' % tplname] if tplname else []
spname = 'test_spider'
p = self.proc('genspider', spname, 'test.com', *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("Created spider %r using template %r in module" % (spname, tplname), out)
self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
p = self.proc('genspider', spname, 'test.com', *args)
out = to_native_str(retry_on_eintr(p.stdout.read))
self.assertIn("Spider %r already exists in module" % spname, out)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class GenspiderStandaloneCommandTest(ProjectTest):
def test_generate_standalone_spider(self):
self.call('genspider', 'example', 'example.com')
assert exists(join(self.temp_path, 'example.py'))
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
debug_log_spider = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug("It Works!")
return []
"""
@contextmanager
def _create_file(self, content, name):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, name))
with open(fname, 'w') as f:
f.write(content)
try:
yield fname
finally:
rmtree(tmpdir)
def runspider(self, code, name='myspider.py', args=()):
with self._create_file(code, name) as fname:
return self.proc('runspider', fname, *args)
def get_log(self, code, name='myspider.py', args=()):
p = self.runspider(code, name=name, args=args)
return to_native_str(p.stderr.read())
def test_runspider(self):
log = self.get_log(self.debug_log_spider)
self.assertIn("DEBUG: It Works!", log)
self.assertIn("INFO: Spider opened", log)
self.assertIn("INFO: Closing spider (finished)", log)
self.assertIn("INFO: Spider closed (finished)", log)
def test_runspider_log_level(self):
log = self.get_log(self.debug_log_spider,
args=('-s', 'LOG_LEVEL=INFO'))
self.assertNotIn("DEBUG: It Works!", log)
self.assertIn("INFO: Spider opened", log)
def test_runspider_dnscache_disabled(self):
# see https://github.com/scrapy/scrapy/issues/2811
# The spider below should not be able to connect to localhost:12345,
# which is intended,
# but this should not be because of DNS lookup error
# assumption: localhost will resolve in all cases (true?)
log = self.get_log("""
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['http://localhost:12345']
def parse(self, response):
return {'test': 'value'}
""",
args=('-s', 'DNSCACHE_ENABLED=False'))
print(log)
self.assertNotIn("DNSLookupError", log)
self.assertIn("INFO: Spider opened", log)
def test_runspider_log_short_names(self):
log1 = self.get_log(self.debug_log_spider,
args=('-s', 'LOG_SHORT_NAMES=1'))
print(log1)
self.assertIn("[myspider] DEBUG: It Works!", log1)
self.assertIn("[scrapy]", log1)
self.assertNotIn("[scrapy.core.engine]", log1)
log2 = self.get_log(self.debug_log_spider,
args=('-s', 'LOG_SHORT_NAMES=0'))
print(log2)
self.assertIn("[myspider] DEBUG: It Works!", log2)
self.assertNotIn("[scrapy]", log2)
self.assertIn("[scrapy.core.engine]", log2)
def test_runspider_no_spider_found(self):
log = self.get_log("from scrapy.spiders import Spider\n")
self.assertIn("No spider found in file", log)
def test_runspider_file_not_found(self):
p = self.proc('runspider', 'some_non_existent_file')
log = to_native_str(p.stderr.read())
self.assertIn("File not found: some_non_existent_file", log)
def test_runspider_unable_to_load(self):
log = self.get_log('', name='myspider.txt')
self.assertIn('Unable to load', log)
def test_start_requests_errors(self):
log = self.get_log("""
import scrapy
class BadSpider(scrapy.Spider):
name = "bad"
def start_requests(self):
raise Exception("oops!")
""", name="badspider.py")
print(log)
self.assertIn("start_requests", log)
self.assertIn("badspider.py", log)
class BenchCommandTest(CommandTest):
def test_run(self):
p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
log = to_native_str(p.stderr.read())
self.assertIn('INFO: Crawled', log)
self.assertNotIn('Unhandled Error', log)
| umrashrf/scrapy | tests/test_commands.py | Python | bsd-3-clause | 11,211 |
"""Models for serialization of tests."""
from client.sources.common import core
class Test(core.Serializable):
name = core.String()
points = core.Float()
partner = core.String(optional=True)
def run(self, env):
"""Subclasses should override this method to run tests.
NOTE: env is intended only for use with the programmatic API for
Python OK tests.
"""
raise NotImplementedError
def score(self):
"""Subclasses should override this method to score the test."""
raise NotImplementedError
def unlock(self, interact):
"""Subclasses should override this method to lock the test."""
raise NotImplementedError
def lock(self, hash_fn):
"""Subclasses should override this method to lock the test."""
raise NotImplementedError
def dump(self):
"""Subclasses should override this method for serialization."""
raise NotImplementedError
class Case(core.Serializable):
"""Abstract case class."""
hidden = core.Boolean(default=False)
locked = core.Boolean(optional=True)
def run(self):
"""Subclasses should override this method for running a test case.
RETURNS:
bool; True if the test case passes, False otherwise.
"""
raise NotImplementedError
def lock(self, hash_fn):
"""Subclasses should override this method for locking a test case.
This method should mutate the object into a locked state.
PARAMETERS:
hash_fn -- function; computes the hash code of a given string.
"""
raise NotImplementedError
def unlock(self, unique_id_prefix, case_id, interact):
"""Subclasses should override this method for unlocking a test case.
It is the responsibility of the the subclass to make any changes to the
test case, including setting its locked field to False.
PARAMETERS:
unique_id_prefix -- string; an identifier for this Case, for purposes of
analytics.
case_id -- string; an identifier for this Case, for purposes of
analytics.
interact -- function; handles user interaction during the unlocking
phase.
"""
raise NotImplementedError
| Cal-CS-61A-Staff/ok-client | client/sources/common/models.py | Python | apache-2.0 | 2,352 |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines APIs related to Core/CoreManager.
"""
from ryu.lib import hub
from ryu.services.protocols.bgp.api.base import register
from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
from ryu.services.protocols.bgp.rtconf.common import CommonConf
NEIGHBOR_RESET_WAIT_TIME = 3
@register(name='core.start')
def start(**kwargs):
"""Starts new context using provided configuration.
Raises RuntimeConfigError if a context is already active.
"""
if CORE_MANAGER.started:
raise RuntimeConfigError('Current context has to be stopped to start '
'a new context.')
waiter = kwargs.pop('waiter')
common_config = CommonConf(**kwargs)
hub.spawn(CORE_MANAGER.start, *[], **{'common_conf': common_config,
'waiter': waiter})
return True
@register(name='core.stop')
def stop(**kwargs):
"""Stops current context is one is active.
Raises RuntimeConfigError if runtime is not active or initialized yet.
"""
if not CORE_MANAGER.started:
raise RuntimeConfigError('No runtime is active. Call start to create '
'a runtime')
CORE_MANAGER.stop()
return True
@register(name='core.reset_neighbor')
def reset_neighor(ip_address):
neighs_conf = CORE_MANAGER.neighbors_conf
neigh_conf = neighs_conf.get_neighbor_conf(ip_address)
# Check if we have neighbor with given IP.
if not neigh_conf:
raise RuntimeConfigError('No neighbor configuration found for given'
' IP: %s' % ip_address)
# If neighbor is enabled, we disable it.
if neigh_conf.enabled:
# Disable neighbor to close existing session.
neigh_conf.enabled = False
# Yield here so that we give chance for neighbor to be disabled.
hub.sleep(NEIGHBOR_RESET_WAIT_TIME)
# Enable neighbor, so that we have a new session with it.
neigh_conf.enabled = True
else:
raise RuntimeConfigError('Neighbor %s is not enabled, hence cannot'
' reset.' % ip_address)
return True
# =============================================================================
# Common configuration related APIs
# =============================================================================
@register(name='comm_conf.get')
def get_common_conf():
comm_conf = CORE_MANAGER.common_conf
return comm_conf.settings
| torufuru/oolhackathon | ryu/services/protocols/bgp/api/core.py | Python | apache-2.0 | 3,160 |
# -*- coding: utf-8 -*-
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from cms.api import copy_plugins_to_language
from cms.models import Title, Page
from cms.utils.i18n import get_language_list
class CopyLangCommand(BaseCommand):
args = '<language_from language_to>'
help = u'duplicate the cms content from one lang to another (to boot a new lang) using draft pages'
def handle(self, *args, **kwargs):
verbose = 'verbose' in args
only_empty = 'force-copy' not in args
site = [arg.split("=")[1] for arg in args if arg.startswith("site")]
if site:
site = site.pop()
else:
site = settings.SITE_ID
#test both langs
try:
assert len(args) >= 2
from_lang = args[0]
to_lang = args[1]
assert from_lang != to_lang
except AssertionError:
raise CommandError("Error: bad arguments -- Usage: manage.py cms copy-lang <lang_from> <lang_to>")
try:
assert from_lang in get_language_list(site)
assert to_lang in get_language_list(site)
except AssertionError:
raise CommandError("Both languages have to be present in settings.LANGUAGES and settings.CMS_LANGUAGES")
for page in Page.objects.on_site(site).drafts():
# copy title
if from_lang in page.get_languages():
try:
title = page.get_title_obj(to_lang, fallback=False)
except Title.DoesNotExist:
title = page.get_title_obj(from_lang)
if verbose:
self.stdout.write('copying title %s from language %s\n' % (title.title, from_lang))
title.id = None
title.language = to_lang
title.save()
# copy plugins using API
if verbose:
self.stdout.write('copying plugins for %s from %s\n' % (page.get_page_title(from_lang), from_lang))
copy_plugins_to_language(page, from_lang, to_lang, only_empty)
else:
if verbose:
self.stdout.write('Skipping page %s, language %s not defined\n' % (page, from_lang))
self.stdout.write(u"all done")
| SinnerSchraderMobileMirrors/django-cms | cms/management/commands/subcommands/copy_lang.py | Python | bsd-3-clause | 2,406 |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class MemberList(ListResource):
def __init__(self, version, service_sid, channel_sid):
"""
Initialize the MemberList
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:param channel_sid: The channel_sid
:returns: twilio.rest.ip_messaging.v1.service.channel.member.MemberList
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberList
"""
super(MemberList, self).__init__(version)
# Path Solution
self._solution = {
'service_sid': service_sid,
'channel_sid': channel_sid,
}
self._uri = '/Services/{service_sid}/Channels/{channel_sid}/Members'.format(**self._solution)
def create(self, identity, role_sid=values.unset):
"""
Create a new MemberInstance
:param unicode identity: The identity
:param unicode role_sid: The role_sid
:returns: Newly created MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
"""
data = values.of({
'Identity': identity,
'RoleSid': role_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return MemberInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
)
def stream(self, identity=values.unset, limit=None, page_size=None):
"""
Streams MemberInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode identity: The identity
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
identity=identity,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, identity=values.unset, limit=None, page_size=None):
"""
Lists MemberInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode identity: The identity
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance]
"""
return list(self.stream(
identity=identity,
limit=limit,
page_size=page_size,
))
def page(self, identity=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of MemberInstance records from the API.
Request is executed immediately
:param unicode identity: The identity
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberPage
"""
params = values.of({
'Identity': identity,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return MemberPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a MemberContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.channel.member.MemberContext
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberContext
"""
return MemberContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a MemberContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.channel.member.MemberContext
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberContext
"""
return MemberContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V1.MemberList>'
class MemberPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the MemberPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The service_sid
:param channel_sid: The channel_sid
:returns: twilio.rest.ip_messaging.v1.service.channel.member.MemberPage
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberPage
"""
super(MemberPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of MemberInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
"""
return MemberInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V1.MemberPage>'
class MemberContext(InstanceContext):
def __init__(self, version, service_sid, channel_sid, sid):
"""
Initialize the MemberContext
:param Version version: Version that contains the resource
:param service_sid: The service_sid
:param channel_sid: The channel_sid
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.channel.member.MemberContext
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberContext
"""
super(MemberContext, self).__init__(version)
# Path Solution
self._solution = {
'service_sid': service_sid,
'channel_sid': channel_sid,
'sid': sid,
}
self._uri = '/Services/{service_sid}/Channels/{channel_sid}/Members/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a MemberInstance
:returns: Fetched MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return MemberInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the MemberInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, role_sid=values.unset,
last_consumed_message_index=values.unset):
"""
Update the MemberInstance
:param unicode role_sid: The role_sid
:param unicode last_consumed_message_index: The last_consumed_message_index
:returns: Updated MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
"""
data = values.of({
'RoleSid': role_sid,
'LastConsumedMessageIndex': last_consumed_message_index,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return MemberInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=self._solution['sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V1.MemberContext {}>'.format(context)
class MemberInstance(InstanceResource):
def __init__(self, version, payload, service_sid, channel_sid, sid=None):
"""
Initialize the MemberInstance
:returns: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
"""
super(MemberInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'channel_sid': payload['channel_sid'],
'service_sid': payload['service_sid'],
'identity': payload['identity'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'role_sid': payload['role_sid'],
'last_consumed_message_index': deserialize.integer(payload['last_consumed_message_index']),
'last_consumption_timestamp': deserialize.iso8601_datetime(payload['last_consumption_timestamp']),
'url': payload['url'],
}
# Context
self._context = None
self._solution = {
'service_sid': service_sid,
'channel_sid': channel_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: MemberContext for this MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberContext
"""
if self._context is None:
self._context = MemberContext(
self._version,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def channel_sid(self):
"""
:returns: The channel_sid
:rtype: unicode
"""
return self._properties['channel_sid']
@property
def service_sid(self):
"""
:returns: The service_sid
:rtype: unicode
"""
return self._properties['service_sid']
@property
def identity(self):
"""
:returns: The identity
:rtype: unicode
"""
return self._properties['identity']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def role_sid(self):
"""
:returns: The role_sid
:rtype: unicode
"""
return self._properties['role_sid']
@property
def last_consumed_message_index(self):
"""
:returns: The last_consumed_message_index
:rtype: unicode
"""
return self._properties['last_consumed_message_index']
@property
def last_consumption_timestamp(self):
"""
:returns: The last_consumption_timestamp
:rtype: datetime
"""
return self._properties['last_consumption_timestamp']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a MemberInstance
:returns: Fetched MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the MemberInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, role_sid=values.unset,
last_consumed_message_index=values.unset):
"""
Update the MemberInstance
:param unicode role_sid: The role_sid
:param unicode last_consumed_message_index: The last_consumed_message_index
:returns: Updated MemberInstance
:rtype: twilio.rest.ip_messaging.v1.service.channel.member.MemberInstance
"""
return self._proxy.update(
role_sid=role_sid,
last_consumed_message_index=last_consumed_message_index,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V1.MemberInstance {}>'.format(context)
| angadpc/Alexa-Project- | twilio/rest/ip_messaging/v1/service/channel/member.py | Python | mit | 16,156 |
from moto import mock_cloudformation_deprecated, mock_ec2_deprecated
from moto import mock_cloudformation, mock_ec2
from tests import EXAMPLE_AMI_ID
from tests.test_cloudformation.fixtures import vpc_eni
import boto
import boto.ec2
import boto.cloudformation
import boto.vpc
import boto3
import json
import sure # noqa
@mock_ec2_deprecated
@mock_cloudformation_deprecated
def test_elastic_network_interfaces_cloudformation():
template = vpc_eni.template
template_json = json.dumps(template)
conn = boto.cloudformation.connect_to_region("us-west-1")
conn.create_stack("test_stack", template_body=template_json)
ec2_conn = boto.ec2.connect_to_region("us-west-1")
eni = ec2_conn.get_all_network_interfaces()[0]
eni.private_ip_addresses.should.have.length_of(1)
stack = conn.describe_stacks()[0]
resources = stack.describe_resources()
cfn_eni = [
resource
for resource in resources
if resource.resource_type == "AWS::EC2::NetworkInterface"
][0]
cfn_eni.physical_resource_id.should.equal(eni.id)
outputs = {output.key: output.value for output in stack.outputs}
outputs["ENIIpAddress"].should.equal(eni.private_ip_addresses[0].private_ip_address)
@mock_ec2
@mock_cloudformation
def test_volume_size_through_cloudformation():
ec2 = boto3.client("ec2", region_name="us-east-1")
cf = boto3.client("cloudformation", region_name="us-east-1")
volume_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "dummy",
"InstanceType": "t2.micro",
"BlockDeviceMappings": [
{"DeviceName": "/dev/sda2", "Ebs": {"VolumeSize": "50"}}
],
"Tags": [
{"Key": "foo", "Value": "bar"},
{"Key": "blah", "Value": "baz"},
],
},
}
},
}
template_json = json.dumps(volume_template)
cf.create_stack(StackName="test_stack", TemplateBody=template_json)
instances = ec2.describe_instances()
volume = instances["Reservations"][0]["Instances"][0]["BlockDeviceMappings"][0][
"Ebs"
]
volumes = ec2.describe_volumes(VolumeIds=[volume["VolumeId"]])
volumes["Volumes"][0]["Size"].should.equal(50)
@mock_ec2_deprecated
@mock_cloudformation_deprecated
def test_subnet_tags_through_cloudformation():
vpc_conn = boto.vpc.connect_to_region("us-west-1")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"testSubnet": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"VpcId": vpc.id,
"CidrBlock": "10.0.0.0/24",
"AvailabilityZone": "us-west-1b",
"Tags": [
{"Key": "foo", "Value": "bar"},
{"Key": "blah", "Value": "baz"},
],
},
}
},
}
cf_conn = boto.cloudformation.connect_to_region("us-west-1")
template_json = json.dumps(subnet_template)
cf_conn.create_stack("test_stack", template_body=template_json)
subnet = vpc_conn.get_all_subnets(filters={"cidrBlock": "10.0.0.0/24"})[0]
subnet.tags["foo"].should.equal("bar")
subnet.tags["blah"].should.equal("baz")
| william-richard/moto | tests/test_ec2/test_ec2_cloudformation.py | Python | apache-2.0 | 3,624 |
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# All rights reserved; available under the terms of the BSD License.
"""
esky.bdist_esky: distutils command to freeze apps in esky format
Importing this module makes "bdist_esky" available as a distutils command.
This command will freeze the given scripts and package them into a zipfile
named with the application name, version and platform.
The resulting zipfile is conveniently in the format expected by the class
DefaultVersionFinder. It will be named "appname-version.platform.zip"
"""
from __future__ import with_statement
import os
import re
import sys
import shutil
import zipfile
import tempfile
import hashlib
import inspect
from glob import glob
import distutils.command
from distutils.core import Command
from distutils.util import convert_path
import esky.patch
from esky.util import get_platform, is_core_dependency, create_zipfile, \
split_app_version, join_app_version, ESKY_CONTROL_DIR, \
ESKY_APPDATA_DIR, really_rmtree
if sys.platform == "win32":
from esky import winres
from xml.dom import minidom
try:
from esky.bdist_esky import pypyc
except ImportError, e:
pypyc = None
PYPYC_ERROR = e
COMPILED_BOOTSTRAP_CACHE = None
else:
COMPILED_BOOTSTRAP_CACHE = os.path.dirname(__file__)
if not os.path.isdir(COMPILED_BOOTSTRAP_CACHE):
COMPILED_BOOTSTRAP_CACHE = None
# setuptools likes to be imported before anything else that
# might monkey-patch distutils. We don't actually use it,
# this is just to avoid errors with cx_Freeze.
try:
import setuptools
except ImportError:
pass
_FREEZERS = {}
try:
from esky.bdist_esky import f_py2exe
_FREEZERS["py2exe"] = f_py2exe
except ImportError:
_FREEZERS["py2exe"] = None
try:
from esky.bdist_esky import f_py2app
_FREEZERS["py2app"] = f_py2app
except ImportError:
_FREEZERS["py2app"] = None
try:
from esky.bdist_esky import f_bbfreeze
_FREEZERS["bbfreeze"] = f_bbfreeze
except ImportError:
_FREEZERS["bbfreeze"] = None
try:
from esky.bdist_esky import f_cxfreeze
_FREEZERS["cxfreeze"] = f_cxfreeze
_FREEZERS["cx_Freeze"] = f_cxfreeze
_FREEZERS["cx_freeze"] = f_cxfreeze
except ImportError:
_FREEZERS["cxfreeze"] = None
_FREEZERS["cx_Freeze"] = None
_FREEZERS["cx_freeze"] = None
class Executable(unicode):
"""Class to hold information about a specific executable.
This class provides a uniform way to specify extra meta-data about
a frozen executable. By setting various keyword arguments, you can
specify e.g. the icon, and whether it is a gui-only script.
Some freezer modules require all items in the "scripts" argument to
be strings naming real files. This is therefore a subclass of unicode,
and if it refers only to in-memory code then its string value will be
the path to this very file. I know it's ugly, but it works.
"""
def __new__(cls,script,**kwds):
if isinstance(script,basestring):
return unicode.__new__(cls,script)
else:
return unicode.__new__(cls,__file__)
def __init__(self,script,name=None,icon=None,gui_only=None,
include_in_bootstrap_env=True,**kwds):
unicode.__init__(self)
if isinstance(script,Executable):
script = script.script
if name is None:
name = script.name
if gui_only is None:
gui_only = script.gui_only
if not isinstance(script,basestring):
if name is None:
raise TypeError("Must specify name if script is not a file")
self.script = script
self.include_in_bootstrap_env = include_in_bootstrap_env
self.icon = icon
self._name = name
self._gui_only = gui_only
self._kwds = kwds
@property
def name(self):
if self._name is not None:
nm = self._name
else:
if not isinstance(self.script,basestring):
raise TypeError("Must specify name if script is not a file")
nm = os.path.basename(self.script)
if nm.endswith(".py"):
nm = nm[:-3]
elif nm.endswith(".pyw"):
nm = nm[:-4]
if sys.platform == "win32" and not nm.endswith(".exe"):
nm += ".exe"
return nm
@property
def gui_only(self):
if self._gui_only is None:
if not isinstance(self.script,basestring):
return False
else:
return self.script.endswith(".pyw")
else:
return self._gui_only
class bdist_esky(Command):
"""Create a frozen application in 'esky' format.
This distutils command can be used to freeze an application in the
format expected by esky. It interprets the following standard
distutils options:
scripts: list of scripts to freeze as executables;
to make a gui-only script, name it 'script.pyw'
data_files: copied into the frozen app directory
package_data: copied into library.zip alongside the module code
To further customize the behaviour of the bdist_esky command, you can
specify the following custom options:
includes: a list of modules to explicitly include in the freeze
excludes: a list of modules to explicitly exclude from the freeze
freezer_module: name of freezer module to use; currently py2exe,
py2app, bbfreeze and cx-freeze are supported.
freezer_options: dict of options to pass through to the underlying
freezer module.
bootstrap_module: a custom module to use for esky bootstrapping;
the default calls esky.bootstrap.bootstrap()
bootstrap_code: a custom code string to use for esky bootstrapping;
this precludes the use of the bootstrap_module option.
If a non-string object is given, its source is taken
using inspect.getsource().
compile_bootstrap_exes: whether to compile the bootstrapping code to a
stand-alone exe; this requires PyPy installed
and the bootstrap code to be valid RPython.
When false, the bootstrap env will use a
trimmed-down copy of the freezer module exe.
dont_run_startup_hooks: don't force all executables to call
esky.run_startup_hooks() on startup.
bundle_msvcrt: whether to bundle the MSVCRT DLLs, manifest files etc
as a private assembly. The default is False; only
those with a valid license to redistriute these files
should enable it.
pre_freeze_callback: function to call just before starting to freeze
the application; this is a good opportunity to
customize the bdist_esky instance.
pre_zip_callback: function to call just before starting to zip up
the frozen application; this is a good opportunity
to e.g. sign the resulting executables.
"""
description = "create a frozen app in 'esky' format"
user_options = [
('dist-dir=', 'd',
"directory to put final built distributions in"),
('freezer-module=', None,
"module to use for freezing the application"),
('freezer-options=', None,
"options to pass to the underlying freezer module"),
('bootstrap-module=', None,
"module to use for bootstrapping the application"),
('bootstrap-code=', None,
"code to use for bootstrapping the application"),
('compile-bootstrap-exes=', None,
"whether to compile the bootstrapping exes with pypy"),
('bundle-msvcrt=', None,
"whether to bundle MSVCRT as private assembly"),
('includes=', None,
"list of modules to specifically include"),
('excludes=', None,
"list of modules to specifically exclude"),
('dont-run-startup-hooks=', None,
"don't force execution of esky.run_startup_hooks()"),
('pre-freeze-callback=', None,
"function to call just before starting to freeze the app"),
('pre-zip-callback=', None,
"function to call just before starting to zip up the app"),
('enable-appdata-dir=', None,
"enable new 'appdata' directory layout (will go away after the 0.9.X series)"),
('detached-bootstrap-library=', None,
"By default Esky appends the library.zip to the bootstrap executable when using CX_Freeze, this will tell esky to not do that, but create a separate library.zip instead"),
]
boolean_options = ["bundle-msvcrt","dont-run-startup-hooks","compile-bootstrap-exes","enable-appdata-dir"]
def initialize_options(self):
self.dist_dir = None
self.includes = []
self.excludes = []
self.freezer_module = None
self.freezer_options = {}
self.bundle_msvcrt = False
self.dont_run_startup_hooks = False
self.bootstrap_module = None
self.bootstrap_code = None
self.compile_bootstrap_exes = False
self._compiled_exes = {}
self.pre_freeze_callback = None
self.pre_zip_callback = None
self.enable_appdata_dir = False
self.detached_bootstrap_library = False
def finalize_options(self):
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.compile_bootstrap_exes and pypyc is None:
raise PYPYC_ERROR
if self.freezer_module is None:
for freezer_module in ("py2exe","py2app","bbfreeze","cxfreeze"):
self.freezer_module = _FREEZERS[freezer_module]
if self.freezer_module is not None:
break
else:
err = "no supported freezer modules found"
err += " (try installing bbfreeze)"
raise RuntimeError(err)
else:
try:
freezer = _FREEZERS[self.freezer_module]
except KeyError:
err = "freezer module not supported: '%s'"
err = err % (self.freezer_module,)
raise RuntimeError(err)
else:
if freezer is None:
err = "freezer module not found: '%s'"
err = err % (self.freezer_module,)
raise RuntimeError(err)
self.freezer_module = freezer
if isinstance(self.pre_freeze_callback,basestring):
self.pre_freeze_callback = self._name2func(self.pre_freeze_callback)
if isinstance(self.pre_zip_callback,basestring):
self.pre_zip_callback = self._name2func(self.pre_zip_callback)
def _name2func(self,name):
"""Convert a dotted name into a function reference."""
if "." not in name:
return globals()[name]
modname,funcname = name.rsplit(".",1)
mod = __import__(modname,fromlist=[funcname])
return getattr(mod,funcname)
def run(self):
self.tempdir = tempfile.mkdtemp()
try:
self._run()
finally:
really_rmtree(self.tempdir)
def _run(self):
self._run_initialise_dirs()
if self.pre_freeze_callback is not None:
self.pre_freeze_callback(self)
self._run_freeze_scripts()
if self.pre_zip_callback is not None:
self.pre_zip_callback(self)
self._run_create_zipfile()
def _run_initialise_dirs(self):
"""Create the dirs into which to freeze the app."""
fullname = self.distribution.get_fullname()
platform = get_platform()
self.bootstrap_dir = os.path.join(self.dist_dir,
"%s.%s"%(fullname,platform,))
if self.enable_appdata_dir:
self.freeze_dir = os.path.join(self.bootstrap_dir,ESKY_APPDATA_DIR,
"%s.%s"%(fullname,platform,))
else:
self.freeze_dir = os.path.join(self.bootstrap_dir,
"%s.%s"%(fullname,platform,))
if os.path.exists(self.bootstrap_dir):
really_rmtree(self.bootstrap_dir)
os.makedirs(self.freeze_dir)
def _run_freeze_scripts(self):
"""Call the selected freezer module to freeze the scripts."""
fullname = self.distribution.get_fullname()
platform = get_platform()
self.freezer_module.freeze(self)
if platform != "win32":
lockfile = os.path.join(self.freeze_dir,ESKY_CONTROL_DIR,"lockfile.txt")
with open(lockfile,"w") as lf:
lf.write("this file is used by esky to lock the version dir\n")
def _run_create_zipfile(self):
"""Zip up the final distribution."""
print "zipping up the esky"
fullname = self.distribution.get_fullname()
platform = get_platform()
zfname = os.path.join(self.dist_dir,"%s.%s.zip"%(fullname,platform,))
if hasattr(self.freezer_module,"zipit"):
self.freezer_module.zipit(self,self.bootstrap_dir,zfname)
else:
create_zipfile(self.bootstrap_dir,zfname,compress=True)
really_rmtree(self.bootstrap_dir)
def _obj2code(self,obj):
"""Convert an object to some python source code.
Iterables are flattened, None is elided, strings are included verbatim,
open files are read and anything else is passed to inspect.getsource().
"""
if obj is None:
return ""
if isinstance(obj,basestring):
return obj
if hasattr(obj,"read"):
return obj.read()
try:
return "\n\n\n".join(self._obj2code(i) for i in obj)
except TypeError:
return inspect.getsource(obj)
def get_bootstrap_code(self):
"""Get any extra code to be executed by the bootstrapping exe.
This method interprets the bootstrap-code and bootstrap-module settings
to construct any extra bootstrapping code that must be executed by
the frozen bootstrap executable. It is returned as a string.
"""
bscode = self.bootstrap_code
if bscode is None:
if self.bootstrap_module is not None:
bscode = __import__(self.bootstrap_module)
for submod in self.bootstrap_module.split(".")[1:]:
bscode = getattr(bscode,submod)
bscode = self._obj2code(bscode)
return bscode
def get_executables(self,normalise=True):
"""Yield a normalised Executable instance for each script to be frozen.
If "normalise" is True (the default) then the user-provided scripts
will be rewritten to decode any non-filename items specified as part
of the script, and to include the esky startup code. If the freezer
has a better way of doing these things, it should pass normalise=False.
"""
if normalise:
if not os.path.exists(os.path.join(self.tempdir,"scripts")):
os.mkdir(os.path.join(self.tempdir,"scripts"))
if self.distribution.has_scripts():
for s in self.distribution.scripts:
if isinstance(s,Executable):
exe = s
else:
exe = Executable(s)
if normalise:
# Give the normalised script file a name matching that
# specified, since some freezers only take the filename.
name = exe.name
if sys.platform == "win32" and name.endswith(".exe"):
name = name[:-4]
if exe.endswith(".pyw"):
ext = ".pyw"
else:
ext = ".py"
script = os.path.join(self.tempdir,"scripts",name+ext)
# Get the code for the target script.
# If it's a single string then interpret it as a filename,
# otherwise feed it into the _obj2code logic.
if isinstance(exe.script,basestring):
with open(exe.script,"rt") as f:
code = f.read()
else:
code = self._obj2code(exe.script)
# Check that the code actually compiles - sometimes it
# can be hard to get a good message out of the freezer.
compile(code,"","exec")
# Augment the given code with special esky-related logic.
with open(script,"wt") as fOut:
lines = (ln+"\n" for ln in code.split("\n"))
# Keep any leading comments and __future__ imports
# at the start of the file.
for ln in lines:
if ln.strip():
if not ln.strip().startswith("#"):
if "__future__" not in ln:
break
fOut.write(ln)
# Run the startup hooks before any actual code.
if not self.dont_run_startup_hooks:
fOut.write("import esky\n")
fOut.write("esky.run_startup_hooks()\n")
fOut.write("\n")
# Then just include the rest of the script code.
fOut.write(ln)
for ln in lines:
fOut.write(ln)
new_exe = Executable(script)
new_exe.__dict__.update(exe.__dict__)
new_exe.script = script
exe = new_exe
yield exe
def get_data_files(self):
"""Yield (source,destination) tuples for data files.
This method generates the names of all data file to be included in
the frozen app. They should be placed directly into the freeze
directory as raw files.
"""
fdir = self.freeze_dir
if sys.platform == "win32" and self.bundle_msvcrt:
for (src,dst) in self.get_msvcrt_private_assembly_files():
yield (src,dst)
if self.distribution.data_files:
for datafile in self.distribution.data_files:
# Plain strings get placed in the root dist directory.
if isinstance(datafile,basestring):
datafile = ("",[datafile])
(dst,sources) = datafile
if os.path.isabs(dst):
err = "cant freeze absolute data_file paths (%s)"
err = err % (dst,)
raise ValueError(err)
dst = convert_path(dst)
for src in sources:
src = convert_path(src)
yield (src,os.path.join(dst,os.path.basename(src)))
def get_package_data(self):
"""Yield (source,destination) tuples for package data files.
This method generates the names of all package data files to be
included in the frozen app. They should be placed in the library.zip
or equivalent, alongside the python files for that package.
"""
if self.distribution.package_data:
for pkg,data in self.distribution.package_data.iteritems():
pkg_dir = self.get_package_dir(pkg)
pkg_path = pkg.replace(".","/")
if isinstance(data,basestring):
data = [data]
for dpattern in data:
dfiles = glob(os.path.join(pkg_dir,convert_path(dpattern)))
for nm in dfiles:
arcnm = pkg_path + nm[len(pkg_dir):]
yield (nm,arcnm)
def get_package_dir(self,pkg):
"""Return directory where the given package is located.
This was largely swiped from distutils, with some cleanups.
"""
inpath = pkg.split(".")
outpath = []
if not self.distribution.package_dir:
outpath = inpath
else:
while inpath:
try:
dir = self.distribution.package_dir[".".join(inpath)]
except KeyError:
outpath.insert(0, inpath[-1])
del inpath[-1]
else:
outpath.insert(0, dir)
break
else:
try:
dir = self.package_dir[""]
except KeyError:
pass
else:
outpath.insert(0, dir)
if outpath:
return os.path.join(*outpath)
else:
return ""
@staticmethod
def get_msvcrt_private_assembly_files():
"""Get (source,destination) tuples for the MSVCRT DLLs, manifest etc.
This method generates data_files tuples for the MSVCRT DLLs, manifest
and associated paraphernalia. Including these files is required for
newer Python versions if you want to run on machines that don't have
the latest C runtime installed *and* you don't want to run the special
"vcredist_x86.exe" program during your installation process.
Bundling is only performed on win32 paltforms, and only if you enable
it explicitly. Before doing so, carefully check whether you have a
license to distribute these files.
"""
cls = bdist_esky
msvcrt_info = cls._get_msvcrt_info()
if msvcrt_info is not None:
msvcrt_name = msvcrt_info[0]
# Find installed manifest file with matching info
for candidate in cls._find_msvcrt_manifest_files(msvcrt_name):
manifest_file, msvcrt_dir = candidate
try:
with open(manifest_file,"rb") as mf:
manifest_data = mf.read()
for info in msvcrt_info:
if info.encode() not in manifest_data:
break
else:
break
except EnvironmentError:
pass
else:
err = "manifest for %s not found" % (msvcrt_info,)
raise RuntimeError(err)
# Copy the manifest and matching directory into the freeze dir.
manifest_name = msvcrt_name + ".manifest"
yield (manifest_file,os.path.join(msvcrt_name,manifest_name))
for fnm in os.listdir(msvcrt_dir):
yield (os.path.join(msvcrt_dir,fnm),
os.path.join(msvcrt_name,fnm))
@staticmethod
def _get_msvcrt_info():
"""Get info about the MSVCRT in use by this python executable.
This parses the name, version and public key token out of the exe
manifest and returns them as a tuple.
"""
try:
manifest_str = winres.get_app_manifest()
except EnvironmentError:
return None
manifest = minidom.parseString(manifest_str)
for assembly in manifest.getElementsByTagName("assemblyIdentity"):
name = assembly.attributes["name"].value
if name.startswith("Microsoft") and name.endswith("CRT"):
version = assembly.attributes["version"].value
pubkey = assembly.attributes["publicKeyToken"].value
return (name,version,pubkey)
return None
@staticmethod
def _find_msvcrt_manifest_files(name):
"""Search the system for candidate MSVCRT manifest files.
This method yields (manifest_file,msvcrt_dir) tuples giving a candidate
manifest file for the given assembly name, and the directory in which
the actual assembly data files are found.
"""
cls = bdist_esky
# Search for redist files in a Visual Studio install
progfiles = os.path.expandvars("%PROGRAMFILES%")
for dnm in os.listdir(progfiles):
if dnm.lower().startswith("microsoft visual studio"):
dpath = os.path.join(progfiles,dnm,"VC","redist")
for (subdir,_,filenames) in os.walk(dpath):
for fnm in filenames:
if name.lower() in fnm.lower():
if fnm.lower().endswith(".manifest"):
mf = os.path.join(subdir,fnm)
md = cls._find_msvcrt_dir_for_manifest(name,mf)
if md is not None:
yield (mf,md)
# Search for manifests installed in the WinSxS directory
winsxs_m = os.path.expandvars("%WINDIR%\\WinSxS\\Manifests")
for fnm in os.listdir(winsxs_m):
if name.lower() in fnm.lower():
if fnm.lower().endswith(".manifest"):
mf = os.path.join(winsxs_m,fnm)
md = cls._find_msvcrt_dir_for_manifest(name,mf)
if md is not None:
yield (mf,md)
winsxs = os.path.expandvars("%WINDIR%\\WinSxS")
for fnm in os.listdir(winsxs):
if name.lower() in fnm.lower():
if fnm.lower().endswith(".manifest"):
mf = os.path.join(winsxs,fnm)
md = cls._find_msvcrt_dir_for_manifest(name,mf)
if md is not None:
yield (mf,md)
@staticmethod
def _find_msvcrt_dir_for_manifest(msvcrt_name,manifest_file):
"""Find the directory containing data files for the given manifest.
This searches a few common locations for the data files that go with
the given manifest file. If a suitable directory is found then it is
returned, otherwise None is returned.
"""
# The manifest file might be next to the dir, inside the dir, or
# in a subdir named "Manifests". Walk around till we find it.
msvcrt_dir = ".".join(manifest_file.split(".")[:-1])
if os.path.isdir(msvcrt_dir):
return msvcrt_dir
msvcrt_basename = os.path.basename(msvcrt_dir)
msvcrt_parent = os.path.dirname(os.path.dirname(msvcrt_dir))
msvcrt_dir = os.path.join(msvcrt_parent,msvcrt_basename)
if os.path.isdir(msvcrt_dir):
return msvcrt_dir
msvcrt_dir = os.path.join(msvcrt_parent,msvcrt_name)
if os.path.isdir(msvcrt_dir):
return msvcrt_dir
return None
def compile_to_bootstrap_exe(self,exe,source,relpath=None):
"""Compile the given sourcecode into a bootstrapping exe.
This method compiles the given sourcecode into a stand-alone exe using
PyPy, then stores that in the bootstrap env under the name of the given
Executable object. If the source has been previously compiled then a
cached version of the exe may be used.
"""
if not relpath:
relpath = exe.name
source = "__rpython__ = True\n" + source
cdir = os.path.join(self.tempdir,"compile")
if not os.path.exists(cdir):
os.mkdir(cdir)
source_hash = hashlib.md5(source).hexdigest()
outname = "bootstrap_%s.%s" % (source_hash,get_platform())
if exe.gui_only:
outname += ".gui"
if sys.platform == "win32":
outname += ".exe"
# First try to use a precompiled version.
if COMPILED_BOOTSTRAP_CACHE is not None:
outfile = os.path.join(COMPILED_BOOTSTRAP_CACHE,outname)
if os.path.exists(outfile):
return self.copy_to_bootstrap_env(outfile,relpath)
# Otherwise we have to compile it anew.
try:
outfile = self._compiled_exes[(source_hash,exe.gui_only)]
except KeyError:
infile = os.path.join(cdir,"bootstrap.py")
outfile = os.path.join(cdir,outname)
with open(infile,"wt") as f:
f.write(source)
opts = dict(gui_only=exe.gui_only)
pypyc.compile_rpython(infile,outfile,**opts)
self._compiled_exes[(source_hash,exe.gui_only)] = outfile
# Try to save the compiled exe for future use.
if COMPILED_BOOTSTRAP_CACHE is not None:
cachedfile = os.path.join(COMPILED_BOOTSTRAP_CACHE,outname)
try:
shutil.copy2(outfile,cachedfile)
except EnvironmentError:
pass
return self.copy_to_bootstrap_env(outfile,relpath)
def copy_to_bootstrap_env(self,src,dst=None):
"""Copy the named file into the bootstrap environment.
The filename is also added to the bootstrap manifest.
"""
if dst is None:
dst = src
srcpath = os.path.join(self.freeze_dir,src)
dstpath = os.path.join(self.bootstrap_dir,dst)
if os.path.isdir(srcpath):
self.copy_tree(srcpath,dstpath)
else:
if not os.path.isdir(os.path.dirname(dstpath)):
self.mkpath(os.path.dirname(dstpath))
self.copy_file(srcpath,dstpath)
self.add_to_bootstrap_manifest(dstpath)
return dstpath
def add_to_bootstrap_manifest(self,dstpath):
if not os.path.isdir(os.path.join(self.freeze_dir,ESKY_CONTROL_DIR)):
os.mkdir(os.path.join(self.freeze_dir,ESKY_CONTROL_DIR))
f_manifest = os.path.join(self.freeze_dir,ESKY_CONTROL_DIR,"bootstrap-manifest.txt")
with open(f_manifest,"at") as f_manifest:
f_manifest.seek(0,os.SEEK_END)
if os.path.isdir(dstpath):
for (dirnm,_,filenms) in os.walk(dstpath):
for fnm in filenms:
fpath = os.path.join(dirnm,fnm)
dpath = fpath[len(self.bootstrap_dir)+1:]
if os.sep != "/":
dpath = dpath.replace(os.sep,"/")
f_manifest.write(dpath)
f_manifest.write("\n")
else:
dst = dstpath[len(self.bootstrap_dir)+1:]
if os.sep != "/":
dst = dst.replace(os.sep,"/")
f_manifest.write(dst)
f_manifest.write("\n")
class bdist_esky_patch(Command):
"""Create a patch for a frozen application in 'esky' format.
This distutils command can be used to create a patch file between two
versions of an application frozen with esky. Such a patch can be used
for differential updates between application versions.
"""
user_options = [
('dist-dir=', 'd',
"directory to put final built distributions in"),
('from-version=', None,
"version against which to produce patch"),
]
def initialize_options(self):
self.dist_dir = None
self.from_version = None
def finalize_options(self):
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
def run(self):
fullname = self.distribution.get_fullname()
platform = get_platform()
vdir = "%s.%s" % (fullname,platform,)
appname = split_app_version(vdir)[0]
# Ensure we have current version's esky, as target for patch.
target_esky = os.path.join(self.dist_dir,vdir+".zip")
if not os.path.exists(target_esky):
self.run_command("bdist_esky")
# Generate list of source eskys to patch against.
if self.from_version:
source_vdir = join_app_version(appname,self.from_version,platform)
source_eskys = [os.path.join(self.dist_dir,source_vdir+".zip")]
else:
source_eskys = []
for nm in os.listdir(self.dist_dir):
if target_esky.endswith(nm):
continue
if nm.startswith(appname+"-") and nm.endswith(platform+".zip"):
source_eskys.append(os.path.join(self.dist_dir,nm))
# Write each patch, transparently unzipping the esky
for source_esky in source_eskys:
target_vdir = os.path.basename(source_esky)[:-4]
target_version = split_app_version(target_vdir)[1]
patchfile = vdir+".from-%s.patch" % (target_version,)
patchfile = os.path.join(self.dist_dir,patchfile)
print "patching", target_esky, "against", source_esky, "=>", patchfile
if not self.dry_run:
try:
esky.patch.main(["-Z","diff",source_esky,target_esky,patchfile])
except:
import traceback
traceback.print_exc()
raise
# Monkey-patch distutils to include our commands by default.
distutils.command.__all__.append("bdist_esky")
distutils.command.__all__.append("bdist_esky_patch")
sys.modules["distutils.command.bdist_esky"] = sys.modules["esky.bdist_esky"]
sys.modules["distutils.command.bdist_esky_patch"] = sys.modules["esky.bdist_esky"]
| Darkman/esky | esky/bdist_esky/__init__.py | Python | bsd-3-clause | 34,079 |
import sys
if "../.." not in sys.path: sys.path.insert(0,"../..")
from gSLLexer import *
import ply.lex as lex
code = """0
1.0
5.25
1024e2
1024E2
1024e+2
1024E+2
512e5
512E5
10000e-2
10000E-2
3.14e2
3.14E2
25.00e-2
25.00E-2
"""
gLexer = gSLLexer()
lex.runmain(data = code)
| roskoff/gSL | tests/token_tests/lex_06_numbers.py | Python | gpl-2.0 | 274 |
from .warning_window import WarningWindow
from .monitor_window import MonitorWindowBase
from .text_entry import TextEntry
from .text_entry import Colors
| aserebryakov/godville-monitor-console | monitor/core/__init__.py | Python | gpl-2.0 | 154 |
import weakref
from python2.shared.codec import BaseDecodingSession, BaseEncodingSession
class ServerCodec():
def __init__(self, server):
self.server = weakref.proxy(server)
def encoding_session(self):
return ServerEncodingSession(self.server)
def encode(self, obj, depth):
return self.encoding_session().encode(obj, depth)
def decoding_session(self):
return ServerDecodingSession(self.server)
def decode(self, obj):
return self.decoding_session().decode(obj)
class ServerEncodingSession(BaseEncodingSession):
""" Python 2 server object encoder. """
def __init__(self, server):
super(ServerEncodingSession, self).__init__()
self.server = server
def _enc_ref(self, obj):
""" Encode an object as a reference. """
self.server.cache_add(obj)
return dict(type='ref', id=id(obj))
class ServerDecodingSession(BaseDecodingSession):
def __init__(self, server):
super(ServerDecodingSession, self).__init__()
self.server = server
def _dec_ref(self, data):
""" Decode an object reference. """
return self.server.cache_get(data['id'])
| nickgaya/python2 | python2/server/codec.py | Python | mit | 1,188 |
'''
Local File Publisher
Abstract:
The class contained within this module allows python programs to
publish weather conditions to a local text file. The format of the file is:
field value [value ...]
field value [value ...]
field value [value ...]
...
...
Each 'field' will begin on a separate line. The 'field' parameter is always a
single word. Depending on the field, there maybe be multiple 'value'
parameters. All fields and values are separated by a single space. String
values will be surrounded by quotes.
This class does not define field names. The implementation assigns field names
from the keyword parameters passed to it through the set() method. Therefore it
is up to the user to define all field names using named parameters with the
'set()' method. If you desire to keep the TextFile.set() command compatible
with other set() publisher methods, please reference the other classes for
expected field names.
Usage:
>>> publisher = TextFile( 'file_name' )
>>> publisher.set( ... )
>>> publisher.publish()
Author: Patrick C. McGinty (pyweather@tuxcoder.com)
Date: Thursday, July 15 2010
'''
import io
import logging
log = logging.getLogger(__name__)
from . _base import *
class TextFile(object):
'''
Publishes weather data to a local file. See module
documentation for additional information and usage idioms.
'''
def __init__(self, file_name):
self.file_name = file_name
self.args = {}
def set( self, **kw):
'''
Store keyword args to be written to output file.
'''
self.args = kw
log.debug( self.args )
@staticmethod
def _append_vals( buf, val):
if isinstance(val,dict):
msg = 'unsupported %s type: %s' % (type(val),repr(val),)
log.error(msg)
if isinstance(val,(list,tuple)):
for i in val:
TextFile._append_vals(buf, i)
else:
buf.write(' ' + repr(val))
def publish(self):
'''
Write output file.
'''
with open( self.file_name, 'w') as fh:
for k,v in self.args.items():
buf = io.StringIO()
buf.write(k)
self._append_vals(buf,v)
fh.write(buf.getvalue() + '\n')
buf.close() # free string buffer
| cmcginty/PyWeather | weather/services/file.py | Python | gpl-3.0 | 2,266 |
#===============================================================================
# Copyright 2014 NetApp, Inc. All Rights Reserved,
# contribution by Jorge Mora <mora@netapp.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#===============================================================================
"""
DNS constants module
Provide constant values and mapping dictionaries for the DNS layer.
RFC 1035 Domain Names - Implementation and Specification
RFC 2671 Extension Mechanisms for DNS (EDNS0)
RFC 4034 Resource Records for the DNS Security Extensions
RFC 4035 Protocol Modifications for the DNS Security Extensions
RFC 4255 Using DNS to Securely Publish Secure Shell (SSH) Key Fingerprints
"""
import nfstest_config as c
# Module constants
__author__ = "Jorge Mora (%s)" % c.NFSTEST_AUTHOR_EMAIL
__copyright__ = "Copyright (C) 2014 NetApp, Inc."
__license__ = "GPL v2"
__version__ = "1.0"
# Enum dns_query
QUERY = 0
REPLY = 1
dns_query = {
0 : "QUERY",
1 : "REPLY",
}
# Enum dns_opcode
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
dns_opcode = {
0 : "QUERY",
1 : "IQUERY",
2 : "STATUS",
4 : "NOTIFY",
5 : "UPDATE",
}
# Enum dns_rcode
NOERROR = 0 # No Error [RFC1035]
DNSERR_FORMERR = 1 # Format Error [RFC1035]
DNSERR_SERVFAIL = 2 # Server Failure [RFC1035]
DNSERR_NXDOMAIN = 3 # Non-Existent Domain [RFC1035]
DNSERR_NOTIMP = 4 # Not Implemented [RFC1035]
DNSERR_REFUSED = 5 # Query Refused [RFC1035]
DNSERR_YXDOMAIN = 6 # Name Exists when it should not [RFC2136][RFC6672]
DNSERR_YXRRSET = 7 # RR Set Exists when it should not [RFC2136]
DNSERR_NXRRSET = 8 # RR Set that should exist does not [RFC2136]
DNSERR_NOTAUTH = 9 # Server Not Authoritative for zone [RFC2136]
# Not Authorized [RFC2845]
DNSERR_NOTZONE = 10 # Name not contained in zone [RFC2136]
DNSERR_BADVERS = 16 # Bad OPT Version [RFC6891]
# TSIG Signature Failure [RFC2845]
DNSERR_BADKEY = 17 # Key not recognized [RFC2845]
DNSERR_BADTIME = 18 # Signature out of time window [RFC2845]
DNSERR_BADMODE = 19 # Bad TKEY Mode [RFC2930]
DNSERR_BADNAME = 20 # Duplicate key name [RFC2930]
DNSERR_BADALG = 21 # Algorithm not supported [RFC2930]
DNSERR_BADTRUNC = 22 # Bad Truncation [RFC4635]
DNSERR_BADCOOKIE = 23 # Bad/missing Server Cookie [RFC7873]
dns_rcode = {
0 : "NOERROR",
1 : "DNSERR_FORMERR",
2 : "DNSERR_SERVFAIL",
3 : "DNSERR_NXDOMAIN",
4 : "DNSERR_NOTIMP",
5 : "DNSERR_REFUSED",
6 : "DNSERR_YXDOMAIN",
7 : "DNSERR_YXRRSET",
8 : "DNSERR_NXRRSET",
9 : "DNSERR_NOTAUTH",
10 : "DNSERR_NOTZONE",
16 : "DNSERR_BADVERS",
17 : "DNSERR_BADKEY",
18 : "DNSERR_BADTIME",
19 : "DNSERR_BADMODE",
20 : "DNSERR_BADNAME",
21 : "DNSERR_BADALG",
22 : "DNSERR_BADTRUNC",
23 : "DNSERR_BADCOOKIE",
}
# Enum dns_type
A = 1 # Host address
NS = 2 # Authoritative name server
MD = 3 # Mail destination (Obsolete - use MX)
MF = 4 # Mail forwarder (Obsolete - use MX)
CNAME = 5 # Canonical name for an alias
SOA = 6 # Marks the start of a zone of authority
MB = 7 # Mailbox domain name (EXPERIMENTAL)
MG = 8 # Mail group member (EXPERIMENTAL)
MR = 9 # Mail rename domain name (EXPERIMENTAL)
NULL = 10 # Null RR (EXPERIMENTAL)
WKS = 11 # Well known service description
PTR = 12 # Domain name pointer
HINFO = 13 # Host information
MINFO = 14 # Mailbox or mail list information
MX = 15 # Mail exchange
TXT = 16 # Text strings
RP = 17 # Responsible Person [RFC1183]
AFSDB = 18 # AFS Data Base location [RFC1183][RFC5864]
X25 = 19 # X.25 PSDN address [RFC1183]
ISDN = 20 # ISDN address [RFC1183]
RT = 21 # Route Through [RFC1183]
NSAP = 22 # NSAP address, NSAP style A record [RFC1706]
NSAPPTR = 23 # Domain name pointer, NSAP style [RFC1348][RFC1637][RFC1706]
SIG = 24 # Security signature [RFC4034][RFC3755][RFC2535][RFC2536][RFC2537][RFC2931][RFC3110][RFC3008]
KEY = 25 # Security key [RFC4034][RFC3755][RFC2535][RFC2536][RFC2537][RFC2539][RFC3008][RFC3110]
PX = 26 # X.400 mail mapping information [RFC2163]
GPOS = 27 # Geographical Position [RFC1712]
AAAA = 28 # IPv6 address
LOC = 29 # Location record
NXT = 30 # Next Domain (OBSOLETE) [RFC3755][RFC2535]
EID = 31 # Endpoint Identifier
NIMLOC = 32 # Nimrod Locator
SRV = 33 # Service locator
ATMA = 34 # ATM Address
NAPTR = 35 # Naming Authority Pointer [RFC2915][RFC2168][RFC3403]
KX = 36 # Key Exchanger [RFC2230]
CERT = 37 # CERT [RFC4398]
A6 = 38 # A6 (OBSOLETE - use AAAA) [RFC3226][RFC2874][RFC6563]
DNAME = 39 # DNAME [RFC6672]
SINK = 40 # SINK
OPT = 41 # OPT pseudo-RR [RFC6891][RFC3225][RFC2671]
APL = 42 # APL [RFC3123]
DS = 43 # Delegation Signer [RFC4034][RFC3658]
SSHFP = 44 # Secure shell fingerprint
IPSECKEY = 45 # IPSECKEY [RFC4025]
RRSIG = 46 # Resource record digital signature
NSEC = 47 # NSEC [RFC4034][RFC3755]
DNSKEY = 48 # DNSKEY [RFC4034][RFC3755]
DHCID = 49 # DHCID [RFC4701]
NSEC3 = 50 # NSEC3 [RFC5155]
NSEC3PARAM = 51 # NSEC3PARAM [RFC5155]
TLSA = 52 # TLSA [RFC6698]
SMIMEA = 53 # S/MIME cert association [draft-ietf-dane-smime]
HIP = 55 # Host Identity Protocol [RFC5205]
NINFO = 56 # NINFO [Jim_Reid] NINFO/ninfo-completed-template 2008-01-21
RKEY = 57 # RKEY [Jim_Reid] RKEY/rkey-completed-template 2008-01-21
TALINK = 58 # Trust Anchor LINK [Wouter_Wijngaards] TALINK/talink-completed-template 2010-02-17
CDS = 59 # Child DS [RFC7344] CDS/cds-completed-template 2011-06-06
CDNSKEY = 60 # DNSKEY(s) the Child wants reflected in DS [RFC7344] 2014-06-16
OPENPGPKEY = 61 # OpenPGP Key [RFC-ietf-dane-openpgpkey-12] OPENPGPKEY/openpgpkey-completed-template 2014-08-12
CSYNC = 62 # Child-To-Parent Synchronization [RFC7477] 2015-01-27
SPF = 99 # [RFC7208]
UINFO = 100 # [IANA-Reserved]
UID = 101 # [IANA-Reserved]
GID = 102 # [IANA-Reserved]
UNSPEC = 103 # [IANA-Reserved]
NID = 104 # [RFC6742] ILNP/nid-completed-template
L32 = 105 # [RFC6742] ILNP/l32-completed-template
L64 = 106 # [RFC6742] ILNP/l64-completed-template
LP = 107 # [RFC6742] ILNP/lp-completed-template
EUI48 = 108 # EUI-48 address [RFC7043] EUI48/eui48-completed-template 2013-03-27
EUI64 = 109 # EUI-64 address [RFC7043] EUI64/eui64-completed-template 2013-03-27
TKEY = 249 # Transaction Key [RFC2930]
TSIG = 250 # Transaction Signature [RFC2845]
IXFR = 251 # Incremental transfer [RFC1995]
AXFR = 252 # Transfer of an entire zone [RFC1035][RFC5936]
MAILB = 253 # Mailbox-related RRs (MB, MG or MR) [RFC1035]
MAILA = 254 # Mail agent RRs (OBSOLETE - see MX) [RFC1035]
ANY = 255 # Request all records
URI = 256 # URI [RFC7553]
CAA = 257 # Certification Authority Restriction [RFC6844]
AVC = 258 # Application Visibility and Control
TA = 32768 # DNSSEC Trust Authorities
DLV = 32769 # DNSSEC Lookaside Validation [RFC4431]
dns_type = {
1 : "A",
2 : "NS",
3 : "MD",
4 : "MF",
5 : "CNAME",
6 : "SOA",
7 : "MB",
8 : "MG",
9 : "MR",
10 : "NULL",
11 : "WKS",
12 : "PTR",
13 : "HINFO",
14 : "MINFO",
15 : "MX",
16 : "TXT",
17 : "RP",
18 : "AFSDB",
19 : "X25",
20 : "ISDN",
21 : "RT",
22 : "NSAP",
23 : "NSAPPTR",
24 : "SIG",
25 : "KEY",
26 : "PX",
27 : "GPOS",
28 : "AAAA",
29 : "LOC",
30 : "NXT",
31 : "EID",
32 : "NIMLOC",
33 : "SRV",
34 : "ATMA",
35 : "NAPTR",
36 : "KX",
37 : "CERT",
38 : "A6",
39 : "DNAME",
40 : "SINK",
41 : "OPT",
42 : "APL",
43 : "DS",
44 : "SSHFP",
45 : "IPSECKEY",
46 : "RRSIG",
47 : "NSEC",
48 : "DNSKEY",
49 : "DHCID",
50 : "NSEC3",
51 : "NSEC3PARAM",
52 : "TLSA",
53 : "SMIMEA",
55 : "HIP",
56 : "NINFO",
57 : "RKEY",
58 : "TALINK",
59 : "CDS",
60 : "CDNSKEY",
61 : "OPENPGPKEY",
62 : "CSYNC",
99 : "SPF",
100 : "UINFO",
101 : "UID",
102 : "GID",
103 : "UNSPEC",
104 : "NID",
105 : "L32",
106 : "L64",
107 : "LP",
108 : "EUI48",
109 : "EUI64",
249 : "TKEY",
250 : "TSIG",
251 : "IXFR",
252 : "AXFR",
253 : "MAILB",
254 : "MAILA",
255 : "ANY",
256 : "URI",
257 : "CAA",
258 : "AVC",
32768 : "TA",
32769 : "DLV",
}
# Enum dns_class
IN = 1 # Internet
CS = 2 # Chaos
CH = 3 # Hesiod
HS = 4 # Internet
NONE = 254 # QCLASS None
ANY = 255 # QCLASS Any
dns_class = {
1 : "IN",
2 : "CS",
3 : "CH",
4 : "HS",
254 : "NONE",
255 : "ANY",
}
# Enum dns_algorithm
RSA = 1 # RSA Algorithm [RFC4255]
DSS = 2 # DSS Algorithm [RFC4255]
ECDSA = 3 # Elliptic Curve Digital Signature Algorithm [RFC6594]
Ed25519 = 4 # Ed25519 Signature Algorithm [RFC7479]
dns_algorithm = {
1 : "RSA",
2 : "DSS",
3 : "ECDSA",
4 : "Ed25519",
}
# Enum dns_fptype
SHA1 = 1 # Secure Hash Algorithm 1
SHA256 = 2 # Secure Hash Algorithm 256
dns_fptype = {
1 : "SHA-1",
2 : "SHA-256",
}
| kofemann/nfstest | packet/application/dns_const.py | Python | gpl-2.0 | 10,472 |
# -*- coding: utf-8 -*-
import tempfile
import os
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.contrib import admin
from django.core.mail import EmailMessage
class Section(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
class Article(models.Model):
"""
A simple article to test admin views. Test backwards compatibility.
"""
title = models.CharField(max_length=100)
content = models.TextField()
date = models.DateTimeField()
section = models.ForeignKey(Section)
def __unicode__(self):
return self.title
def model_year(self):
return self.date.year
model_year.admin_order_field = 'date'
class Book(models.Model):
"""
A simple book that has chapters.
"""
name = models.CharField(max_length=100, verbose_name=u'¿Name?')
def __unicode__(self):
return self.name
class Promo(models.Model):
name = models.CharField(max_length=100, verbose_name=u'¿Name?')
book = models.ForeignKey(Book)
def __unicode__(self):
return self.name
class Chapter(models.Model):
title = models.CharField(max_length=100, verbose_name=u'¿Title?')
content = models.TextField()
book = models.ForeignKey(Book)
def __unicode__(self):
return self.title
class Meta:
verbose_name = u'¿Chapter?'
class ChapterXtra1(models.Model):
chap = models.OneToOneField(Chapter, verbose_name=u'¿Chap?')
xtra = models.CharField(max_length=100, verbose_name=u'¿Xtra?')
def __unicode__(self):
return u'¿Xtra1: %s' % self.xtra
class ChapterXtra2(models.Model):
chap = models.OneToOneField(Chapter, verbose_name=u'¿Chap?')
xtra = models.CharField(max_length=100, verbose_name=u'¿Xtra?')
def __unicode__(self):
return u'¿Xtra2: %s' % self.xtra
def callable_year(dt_value):
return dt_value.year
callable_year.admin_order_field = 'date'
class ArticleInline(admin.TabularInline):
model = Article
class ChapterInline(admin.TabularInline):
model = Chapter
class ArticleAdmin(admin.ModelAdmin):
list_display = ('content', 'date', callable_year, 'model_year', 'modeladmin_year')
list_filter = ('date',)
def changelist_view(self, request):
"Test that extra_context works"
return super(ArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
def modeladmin_year(self, obj):
return obj.date.year
modeladmin_year.admin_order_field = 'date'
class CustomArticle(models.Model):
content = models.TextField()
date = models.DateTimeField()
class CustomArticleAdmin(admin.ModelAdmin):
"""
Tests various hooks for using custom templates and contexts.
"""
change_list_template = 'custom_admin/change_list.html'
change_form_template = 'custom_admin/change_form.html'
object_history_template = 'custom_admin/object_history.html'
delete_confirmation_template = 'custom_admin/delete_confirmation.html'
def changelist_view(self, request):
"Test that extra_context works"
return super(CustomArticleAdmin, self).changelist_view(
request, extra_context={
'extra_var': 'Hello!'
}
)
class ModelWithStringPrimaryKey(models.Model):
id = models.CharField(max_length=255, primary_key=True)
def __unicode__(self):
return self.id
class Color(models.Model):
value = models.CharField(max_length=10)
warm = models.BooleanField()
def __unicode__(self):
return self.value
class Thing(models.Model):
title = models.CharField(max_length=20)
color = models.ForeignKey(Color, limit_choices_to={'warm': True})
def __unicode__(self):
return self.title
class ThingAdmin(admin.ModelAdmin):
list_filter = ('color',)
class Fabric(models.Model):
NG_CHOICES = (
('Textured', (
('x', 'Horizontal'),
('y', 'Vertical'),
)
),
('plain', 'Smooth'),
)
surface = models.CharField(max_length=20, choices=NG_CHOICES)
class FabricAdmin(admin.ModelAdmin):
list_display = ('surface',)
list_filter = ('surface',)
class Person(models.Model):
GENDER_CHOICES = (
(1, "Male"),
(2, "Female"),
)
name = models.CharField(max_length=100)
gender = models.IntegerField(choices=GENDER_CHOICES)
alive = models.BooleanField()
def __unicode__(self):
return self.name
class Meta:
ordering = ["id"]
class PersonAdmin(admin.ModelAdmin):
list_display = ('name', 'gender', 'alive')
list_editable = ('gender', 'alive')
list_filter = ('gender',)
search_fields = (u'name',)
ordering = ["id"]
save_as = True
class Persona(models.Model):
"""
A simple persona associated with accounts, to test inlining of related
accounts which inherit from a common accounts class.
"""
name = models.CharField(blank=False, max_length=80)
def __unicode__(self):
return self.name
class Account(models.Model):
"""
A simple, generic account encapsulating the information shared by all
types of accounts.
"""
username = models.CharField(blank=False, max_length=80)
persona = models.ForeignKey(Persona, related_name="accounts")
servicename = u'generic service'
def __unicode__(self):
return "%s: %s" % (self.servicename, self.username)
class FooAccount(Account):
"""A service-specific account of type Foo."""
servicename = u'foo'
class BarAccount(Account):
"""A service-specific account of type Bar."""
servicename = u'bar'
class FooAccountAdmin(admin.StackedInline):
model = FooAccount
extra = 1
class BarAccountAdmin(admin.StackedInline):
model = BarAccount
extra = 1
class PersonaAdmin(admin.ModelAdmin):
inlines = (
FooAccountAdmin,
BarAccountAdmin
)
class Subscriber(models.Model):
name = models.CharField(blank=False, max_length=80)
email = models.EmailField(blank=False, max_length=175)
def __unicode__(self):
return "%s (%s)" % (self.name, self.email)
class SubscriberAdmin(admin.ModelAdmin):
actions = ['mail_admin']
def mail_admin(self, request, selected):
EmailMessage(
'Greetings from a ModelAdmin action',
'This is the test email from a admin action',
'from@example.com',
['to@example.com']
).send()
class ExternalSubscriber(Subscriber):
pass
class OldSubscriber(Subscriber):
pass
def external_mail(modeladmin, request, selected):
EmailMessage(
'Greetings from a function action',
'This is the test email from a function action',
'from@example.com',
['to@example.com']
).send()
def redirect_to(modeladmin, request, selected):
from django.http import HttpResponseRedirect
return HttpResponseRedirect('/some-where-else/')
class ExternalSubscriberAdmin(admin.ModelAdmin):
actions = [external_mail, redirect_to]
class Media(models.Model):
name = models.CharField(max_length=60)
class Podcast(Media):
release_date = models.DateField()
class PodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'release_date')
list_editable = ('release_date',)
ordering = ('name',)
class Vodcast(Media):
media = models.OneToOneField(Media, primary_key=True, parent_link=True)
released = models.BooleanField(default=False)
class VodcastAdmin(admin.ModelAdmin):
list_display = ('name', 'released')
list_editable = ('released',)
ordering = ('name',)
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False)
name = models.CharField(max_length=30, blank=True)
class ChildInline(admin.StackedInline):
model = Child
class ParentAdmin(admin.ModelAdmin):
model = Parent
inlines = [ChildInline]
class EmptyModel(models.Model):
def __unicode__(self):
return "Primary key = %s" % self.id
class EmptyModelAdmin(admin.ModelAdmin):
def queryset(self, request):
return super(EmptyModelAdmin, self).queryset(request).filter(pk__gt=1)
class OldSubscriberAdmin(admin.ModelAdmin):
actions = None
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class Gallery(models.Model):
name = models.CharField(max_length=100)
class Picture(models.Model):
name = models.CharField(max_length=100)
image = models.FileField(storage=temp_storage, upload_to='test_upload')
gallery = models.ForeignKey(Gallery, related_name="pictures")
class PictureInline(admin.TabularInline):
model = Picture
extra = 1
class GalleryAdmin(admin.ModelAdmin):
inlines = [PictureInline]
class PictureAdmin(admin.ModelAdmin):
pass
class Language(models.Model):
iso = models.CharField(max_length=5, primary_key=True)
name = models.CharField(max_length=50)
english_name = models.CharField(max_length=50)
shortlist = models.BooleanField(default=False)
class Meta:
ordering = ('iso',)
class LanguageAdmin(admin.ModelAdmin):
list_display = ['iso', 'shortlist', 'english_name', 'name']
list_editable = ['shortlist']
# a base class for Recommender and Recommendation
class Title(models.Model):
pass
class TitleTranslation(models.Model):
title = models.ForeignKey(Title)
text = models.CharField(max_length=100)
class Recommender(Title):
pass
class Recommendation(Title):
recommender = models.ForeignKey(Recommender)
class RecommendationAdmin(admin.ModelAdmin):
search_fields = ('titletranslation__text', 'recommender__titletranslation__text',)
class Collector(models.Model):
name = models.CharField(max_length=100)
class Widget(models.Model):
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class DooHickey(models.Model):
code = models.CharField(max_length=10, primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Grommet(models.Model):
code = models.AutoField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Whatsit(models.Model):
index = models.IntegerField(primary_key=True)
owner = models.ForeignKey(Collector)
name = models.CharField(max_length=100)
class Doodad(models.Model):
name = models.CharField(max_length=100)
class FancyDoodad(Doodad):
owner = models.ForeignKey(Collector)
expensive = models.BooleanField(default=True)
class WidgetInline(admin.StackedInline):
model = Widget
class DooHickeyInline(admin.StackedInline):
model = DooHickey
class GrommetInline(admin.StackedInline):
model = Grommet
class WhatsitInline(admin.StackedInline):
model = Whatsit
class FancyDoodadInline(admin.StackedInline):
model = FancyDoodad
class Category(models.Model):
collector = models.ForeignKey(Collector)
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
def __unicode__(self):
return u'%s:o%s' % (self.id, self.order)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'collector', 'order')
list_editable = ('order',)
class CategoryInline(admin.StackedInline):
model = Category
class CollectorAdmin(admin.ModelAdmin):
inlines = [WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline, FancyDoodadInline, CategoryInline]
admin.site.register(Article, ArticleAdmin)
admin.site.register(CustomArticle, CustomArticleAdmin)
admin.site.register(Section, save_as=True, inlines=[ArticleInline])
admin.site.register(ModelWithStringPrimaryKey)
admin.site.register(Color)
admin.site.register(Thing, ThingAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Persona, PersonaAdmin)
admin.site.register(Subscriber, SubscriberAdmin)
admin.site.register(ExternalSubscriber, ExternalSubscriberAdmin)
admin.site.register(OldSubscriber, OldSubscriberAdmin)
admin.site.register(Podcast, PodcastAdmin)
admin.site.register(Vodcast, VodcastAdmin)
admin.site.register(Parent, ParentAdmin)
admin.site.register(EmptyModel, EmptyModelAdmin)
admin.site.register(Fabric, FabricAdmin)
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(Picture, PictureAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(Recommendation, RecommendationAdmin)
admin.site.register(Recommender)
admin.site.register(Collector, CollectorAdmin)
admin.site.register(Category, CategoryAdmin)
# We intentionally register Promo and ChapterXtra1 but not Chapter nor ChapterXtra2.
# That way we cover all four cases:
# related ForeignKey object registered in admin
# related ForeignKey object not registered in admin
# related OneToOne object registered in admin
# related OneToOne object not registered in admin
# when deleting Book so as exercise all four troublesome (w.r.t escaping
# and calling force_unicode to avoid problems on Python 2.3) paths through
# contrib.admin.util's get_deleted_objects function.
admin.site.register(Book, inlines=[ChapterInline])
admin.site.register(Promo)
admin.site.register(ChapterXtra1)
| iguzu/gae-django | tests/regressiontests/admin_views/models.py | Python | bsd-3-clause | 13,438 |
from django import forms
from django.utils.translation import gettext_lazy as _
from TWLight.resources.models import Partner
from .models import Application
from ..users.helpers.authorizations import get_valid_partner_authorizations
"""
Lists and characterizes the types of information that partners can require as
part of access grants. See full comment at end of file and docs at
https://github.com/WikipediaLibrary/TWLight/blob/master/docs/developer.md#changing-the-data-collected-on-application-forms
"""
"""
Harvestable from user profile:
Username (all partnerships)
Email (all partnerships)
Required/nonharvestable:
Optional/universal:
Real name (many partners)
Country of residence (Pelican, Numerique)
Occupation (Elsevier)
Affiliation (Elsevier)
Optional/unique:
Questions/comments/concerns (free-text, all partnerships)
Title requested (McFarland, Pelican)
Agreement with Terms of Use (RSUK)
"""
# ~~~~~ Named constants ~~~~~ #
REAL_NAME = "real_name"
COUNTRY_OF_RESIDENCE = "country_of_residence"
OCCUPATION = "occupation"
AFFILIATION = "affiliation"
PARTNER = "partner"
RATIONALE = "rationale"
SPECIFIC_TITLE = "specific_title"
COMMENTS = "comments"
AGREEMENT_WITH_TERMS_OF_USE = "agreement_with_terms_of_use"
ACCOUNT_EMAIL = "account_email"
REQUESTED_ACCESS_DURATION = "requested_access_duration"
# ~~~~ Basic field names ~~~~ #
USER_FORM_FIELDS = [REAL_NAME, COUNTRY_OF_RESIDENCE, OCCUPATION, AFFILIATION]
# These fields are displayed for all partners.
PARTNER_FORM_BASE_FIELDS = [RATIONALE, COMMENTS]
# These fields are displayed only when a specific partner requires that
# information.
PARTNER_FORM_OPTIONAL_FIELDS = [
SPECIFIC_TITLE,
AGREEMENT_WITH_TERMS_OF_USE,
ACCOUNT_EMAIL,
REQUESTED_ACCESS_DURATION,
]
# ~~~~ Field information ~~~~ #
FIELD_TYPES = {
REAL_NAME: forms.CharField(max_length=128),
COUNTRY_OF_RESIDENCE: forms.CharField(max_length=128),
OCCUPATION: forms.CharField(max_length=128),
AFFILIATION: forms.CharField(max_length=128),
PARTNER: forms.ModelChoiceField(
queryset=Partner.objects.all(), widget=forms.HiddenInput
),
RATIONALE: forms.CharField(widget=forms.Textarea),
SPECIFIC_TITLE: forms.CharField(max_length=128),
COMMENTS: forms.CharField(widget=forms.Textarea, required=False),
AGREEMENT_WITH_TERMS_OF_USE: forms.BooleanField(),
ACCOUNT_EMAIL: forms.EmailField(),
REQUESTED_ACCESS_DURATION: forms.ChoiceField(
choices=Application.REQUESTED_ACCESS_DURATION_CHOICES
),
}
FIELD_LABELS = {
# Translators: When filling out an application, users may need to specify their name
REAL_NAME: _("Your real name"),
# Translators: When filling out an application, users may need to specify the country in which they currently live
COUNTRY_OF_RESIDENCE: _("Your country of residence"),
# Translators: When filling out an application, users may need to specify their current occupation
OCCUPATION: _("Your occupation"),
# Translators: When filling out an application, users may need to specify if they are affiliated with an institution (e.g. a university)
AFFILIATION: _("Your institutional affiliation"),
# Translators: When filling out an application, this labels the name of the publisher or database the user is applying to
PARTNER: _("Partner name"),
# Translators: When filling out an application, users must provide an explanation of why these resources would be useful to them
RATIONALE: _("Why do you want access to this resource?"),
# Translators: When filling out an application, users may need to specify a particular book they want access to
SPECIFIC_TITLE: _("Which book do you want?"),
# Translators: When filling out an application, users are given a text box where they can include any extra relevant information
COMMENTS: _("Anything else you want to say"),
# Translators: When filling out an application, users may be required to check a box to say they agree with the website's Terms of Use document, which is linked
AGREEMENT_WITH_TERMS_OF_USE: _("You must agree with the partner's terms of use"),
# Translators: When filling out an application, users may be required to enter an email they have used to register on the partner's website.
ACCOUNT_EMAIL: _("The email for your account on the partner's website"),
# fmt: off
# Translators: When filling out an application, users may be required to enter the length of the account (expiry) they wish to have for proxy partners.
REQUESTED_ACCESS_DURATION: _("The number of months you wish to have this access for before renewal is required"),
# fmt: on
}
SEND_DATA_FIELD_LABELS = {
# Translators: When sending application data to partners, this is the text labelling a user's real name
REAL_NAME: _("Real name"),
# Translators: When sending application data to partners, this is the text labelling a user's country of residence
COUNTRY_OF_RESIDENCE: _("Country of residence"),
# Translators: When sending application data to partners, this is the text labelling a user's occupation
OCCUPATION: _("Occupation"),
# Translators: When sending application data to partners, this is the text labelling a user's affiliation
AFFILIATION: _("Affiliation"),
# Translators: When sending application data to partners, this is the text labelling the specific title (e.g. a particular book) a user requested
SPECIFIC_TITLE: _("Title requested"),
# Translators: When sending application data to partners, this is the text labelling whether a user agreed with the partner's Terms of Use
AGREEMENT_WITH_TERMS_OF_USE: _("Agreed with terms of use"),
# Translators: When sending application data to partners, this is the text labelling the user's email on the partner's website, if they had to register in advance of applying.
ACCOUNT_EMAIL: _("Account email"),
}
def get_output_for_application(app):
"""
This collates the data that we need to send to publishers for a given
application. Since different publishers require different data and we don't
want to share personal data where not required, we construct this function
to fetch only the required data rather than displaying all of Application
plus Editor in the front end.
"""
# Translators: This labels a user's email address on a form for account coordinators
output = {_("Email"): {"label": "Email", "data": app.editor.user.email}}
for field in PARTNER_FORM_OPTIONAL_FIELDS:
# Since we directly mark applications made to proxy partners as 'sent', this function wouldn't be invoked.
# But for tests, and in the off chance we stumble into this function for when requested_access_duration is true
# and the partner isn't proxy, we don't want the data to be sent to partners, which is why it's not part
# of the SEND_DATA_FIELD_LABELS.
if field == "requested_access_duration":
break
if getattr(app.partner, field): # Will be True if required by Partner.
field_label = SEND_DATA_FIELD_LABELS[field]
output[field] = {"label": field_label, "data": getattr(app, field)}
for field in USER_FORM_FIELDS:
if getattr(app.partner, field): # Will be True if required by Partner.
field_label = SEND_DATA_FIELD_LABELS[field]
output[field] = {"label": field_label, "data": getattr(app.editor, field)}
return output
def count_valid_authorizations(partner_pk):
"""
Retrieves the numbers of valid authorizations using the
get_valid_partner_authorizations() method above.
"""
return get_valid_partner_authorizations(partner_pk).count()
def get_accounts_available(app):
"""
Because we allow number of accounts available on the partner level,
we base our calculations on the partner level.
"""
if app.partner.accounts_available is not None:
valid_authorizations = count_valid_authorizations(app.partner)
return app.partner.accounts_available - valid_authorizations
def is_proxy_and_application_approved(status, app):
if (
app.partner.authorization_method == Partner.PROXY
and status == Application.APPROVED
):
return True
else:
return False
def more_applications_than_accounts_available(app):
total_accounts_available_for_distribution = get_accounts_available(app)
if total_accounts_available_for_distribution is not None and app.status in [
Application.PENDING,
Application.QUESTION,
]:
total_pending_apps = Application.objects.filter(
partner=app.partner, status__in=[Application.PENDING, Application.QUESTION]
)
if (
app.partner.status != Partner.WAITLIST
and total_accounts_available_for_distribution > 0
and total_accounts_available_for_distribution - total_pending_apps.count()
< 0
):
return True
return False
def get_application_field_params_json_schema():
"""
JSON Schema for the field_params object that will be used to create the form
for the application
"""
JSON_SCHEMA_FIELD_PARAMS = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"properties": {
"partner": {
"$id": "#/properties/partner",
"type": "array",
"items": {
"$id": "#/properties/partner/items",
"type": "string",
"examples": ["specific_title", "agreement_with_terms_of_use"],
},
},
"partner_id": {
"$id": "#/properties/partner_id",
"type": "number",
},
"user": {
"$id": "#/properties/user",
"type": "object",
},
},
"additionalProperties": False,
"required": ["partner", "partner_id", "user"],
}
return JSON_SCHEMA_FIELD_PARAMS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
"""
Information comes in three types:
1) Information required for all access grants. (Example: wikipedia username)
2) Information that only some partners require, but which will be the same for
all partners in any given application. (Example: country of residence)
3) Information that only some partners require, and that may differ for
different partners. (Example: rationale for resource request)
These facts about required/optional status are used to generate application
forms. In particular, we can generate application forms which impose the
smallest possible data entry burden on users by:
* omitting optional fields if they aren't required by any of the requested
partners;
* asking for optional information only once per application, rather than once
per partner, if it will be the same for all partners.
Facts related to this file are hardcoded in three other places in the database:
1) In TWLight.resources.models.Partner, which tracks whether a given partner
requires the optional information;
2) In TWLight.applications.forms.Application, which has fields for all
possible partner-specific information (though any given application instance
may leave optional fields blank).
3) In TWLight.users.models.Editor, which records user data.
Why this hardcoding? Well, having defined database models lets us take advantage
of an awful lot of Django machinery. Also, dynamically generating everything on
the fly might be slow and lead to hard-to-read code.
applications.tests checks to make sure that these three sources are in agreement
about the optional data fields available: both their names and their types. It
also checks that the constructed application form fields match those types.
"""
| WikipediaLibrary/TWLight | TWLight/applications/helpers.py | Python | mit | 11,898 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib import request
from tests.integrated import base
class StatusTestCase(base.IntegrationTest):
def _get_config(self):
port = base.get_free_port()
self.url = "http://localhost:%s" % port
conf = {
"service": {
"name": "status",
"module": "rallyci.services.status",
"listen": ["localhost", port],
}
}
return [[conf], [port]]
def test_index(self):
r = request.urlopen(self.url)
self.assertIsNotNone(r)
| redixin/rally-ci | tests/integrated/test_services_status.py | Python | apache-2.0 | 1,144 |
# Snapshot Example
#
# Note: You will need an SD card to run this example.
#
# You can use your OpenMV Cam to save image files.
import sensor, image, pyb
RED_LED_PIN = 1
BLUE_LED_PIN = 3
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
pyb.LED(RED_LED_PIN).on()
sensor.skip_frames(time = 2000) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
pyb.LED(BLUE_LED_PIN).on()
print("You're on camera!")
sensor.snapshot().save("example.jpg") # or "example.bmp" (or others)
pyb.LED(BLUE_LED_PIN).off()
print("Done! Reset the camera to see the saved image.")
| openmv/openmv | scripts/examples/OpenMV/05-Snapshot/snapshot.py | Python | mit | 754 |
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import unittest
from PySide2.QtCore import QObject, Signal
o = QObject()
class MyObject(QObject):
s = Signal(int)
class CheckSignalType(unittest.TestCase):
def testSignal(self):
self.assertTrue(isinstance(QObject.destroyed, Signal))
self.assertEqual(type(QObject.destroyed), Signal)
self.assertEqual(type(o.destroyed).__name__, "SignalInstance")
self.assertNotEqual(type(o.destroyed), Signal)
self.assertTrue(isinstance(o.destroyed, Signal))
self.assertTrue(isinstance(MyObject.s, Signal))
self.assertFalse(isinstance(int, Signal))
if __name__ == '__main__':
unittest.main()
| qtproject/pyside-pyside | tests/QtCore/bug_931.py | Python | lgpl-2.1 | 1,913 |
#!/usr/bin/env python
#The MIT License (MIT)
#Copyright (c) 2020 Massimiliano Patacchiola
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#Tested on Ubuntu 18.04.3 LTS, OpenCV 4.1.2, Python 3
#Comparison of 4 different methods on corner detection.
#Parameters set such that all the methods will find around 500 keypoints in the video.
#You need a video named "video.mp4" in your script folder for running the code.
#Videos sourced from: https://www.pexels.com/videos
import cv2
import numpy as np
from operator import itemgetter
#print(cv2.__version__)
video_capture = cv2.VideoCapture("./video.mp4")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
#out = cv2.VideoWriter("./original.avi", fourcc, 24.0, (3840,2160))
out_harris = cv2.VideoWriter("./harris.avi", fourcc, 24.0, (3840,2160))
out_shitomasi = cv2.VideoWriter("./shitomasi.avi", fourcc, 24.0, (3840,2160))
out_fast = cv2.VideoWriter("./fast.avi", fourcc, 24.0, (3840,2160))
out_orb = cv2.VideoWriter("./orb.avi", fourcc, 24.0, (3840,2160))
while(True):
ret, frame = video_capture.read()
if(frame is None): break #check for empty frames (en of video)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
##Harris
mask_harris = cv2.cornerHarris(np.float32(frame_gray), blockSize=2, ksize=3, k=0.04) #2, 3, 0.04 // 2, 5, 0.07
#mask_harris = cv2.dilate(mask_harris, None)
cutout = np.sort(mask_harris.flatten())[-500] #sort from smaller to higher, then take index for cutout
corners = np.where(mask_harris > cutout)
corners = zip(corners[0], corners[1])
kp = list()
for i in corners: kp.append(cv2.KeyPoint(i[1], i[0], 20))
frame_harris = cv2.drawKeypoints(frame_gray, kp, None, [0, 0, 255],
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
##Shi-Tomasi
#maxCorners: Maximum number of corners to return
#qualityLevel: Parameter characterizing the minimal accepted quality of image corners.
#minDistance: Minimum possible Euclidean distance between the returned corners.
#blockSize: Size of an average block for computing a derivative covariation matrix over each pixel neighborhood.
corners = cv2.goodFeaturesToTrack(frame_gray, maxCorners=500, qualityLevel=0.01, minDistance=10, blockSize=2)
corners = np.int0(corners)
kp = list()
for i in corners: kp.append(cv2.KeyPoint(i.ravel()[0], i.ravel()[1], 20))
frame_shitomasi = cv2.drawKeypoints(frame_gray, kp, None, [0, 0, 255],
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
##FAST
#It is several times faster than other existing corner detectors.
#But it is not robust to high levels of noise. It is dependent on a threshold.
#threshold: a threshold over the point to keep
#nonmaxSuppression: wheter non-maximum suppression is to be applied or not
#Neighborhood (three flags) cv.FAST_FEATURE_DETECTOR_TYPE_5_8 / 7_12 / 9_16
#frame_fast = np.copy(frame)
#Here I choose the magic number 135 for the threshold so that it finds around 500 corners.
fast = cv2.FastFeatureDetector_create(threshold=165, nonmaxSuppression=True,
type=cv2.FAST_FEATURE_DETECTOR_TYPE_9_16) #default is TYPE_9_16
kp = fast.detect(frame_gray, None)
#print(len(kp)) #use this print to check how many keypoints are found by FAST
##Uncomment these two lines if you want to randomly pick 500 keypoints
#indices = np.random.choice(len(kp), 500, replace=False)
#kp = itemgetter(*indices.tolist())(kp)
for i in kp: i.size=20 #changing the diameter to make it coherent with the other methods
frame_fast = cv2.drawKeypoints(frame_gray, kp, None, [0, 0, 255],
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
##ORB (Oriented FAST and Rotated BRIEF)
#First it use FAST to find keypoints, then apply Harris corner measure
#to find top N points among them.
#nFeatures: maximum number of features to be retained
#scoreType: whether Harris score or FAST score to rank the features (default: Harris)
orb = cv2.ORB_create(nfeatures=500)
kp = orb.detect(frame_gray, None)
kp, des = orb.compute(frame_gray, kp)
frame_orb = cv2.drawKeypoints(frame_gray, kp, None, [0, 0, 255],
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#Writing in the output file
#out.write(frame)
out_harris.write(frame_harris)
out_shitomasi.write(frame_shitomasi)
out_fast.write(frame_fast)
out_orb.write(frame_orb)
#Showing the frame and waiting for the exit command
#cv2.imshow('Original', frame) #show on window
cv2.imshow('Harris', frame_harris) #show on window
cv2.imshow('Shi-Tomasi', frame_shitomasi) #show on window
cv2.imshow('FAST', frame_fast) #show on window
cv2.imshow('ORB', frame_orb) #show on window
if cv2.waitKey(1) & 0xFF == ord('q'): break #Exit when Q is pressed
#Release the camera
video_capture.release()
print("Bye...")
| mpatacchiola/deepgaze | examples/ex_corner_detection_video/ex_corner_detection.py | Python | mit | 5,486 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""Use blog post test to test user permissions logic"""
import frappe
import frappe.defaults
import unittest
import json
from frappe.desk.doctype.event.event import get_events
from frappe.test_runner import make_test_objects
test_records = frappe.get_test_records('Event')
class TestEvent(unittest.TestCase):
def setUp(self):
frappe.db.sql('delete from tabEvent')
make_test_objects('Event', reset=True)
self.test_records = frappe.get_test_records('Event')
self.test_user = "test1@example.com"
def tearDown(self):
frappe.set_user("Administrator")
def test_allowed_public(self):
frappe.set_user(self.test_user)
doc = frappe.get_doc("Event", frappe.db.get_value("Event", {"subject":"_Test Event 1"}))
self.assertTrue(frappe.has_permission("Event", doc=doc))
def test_not_allowed_private(self):
frappe.set_user(self.test_user)
doc = frappe.get_doc("Event", frappe.db.get_value("Event", {"subject":"_Test Event 2"}))
self.assertFalse(frappe.has_permission("Event", doc=doc))
def test_allowed_private_if_in_event_user(self):
name = frappe.db.get_value("Event", {"subject":"_Test Event 3"})
frappe.share.add("Event", name, self.test_user, "read")
frappe.set_user(self.test_user)
doc = frappe.get_doc("Event", name)
self.assertTrue(frappe.has_permission("Event", doc=doc))
frappe.set_user("Administrator")
frappe.share.remove("Event", name, self.test_user)
def test_event_list(self):
frappe.set_user(self.test_user)
res = frappe.get_list("Event", filters=[["Event", "subject", "like", "_Test Event%"]], fields=["name", "subject"])
self.assertEqual(len(res), 1)
subjects = [r.subject for r in res]
self.assertTrue("_Test Event 1" in subjects)
self.assertFalse("_Test Event 3" in subjects)
self.assertFalse("_Test Event 2" in subjects)
def test_revert_logic(self):
ev = frappe.get_doc(self.test_records[0]).insert()
name = ev.name
frappe.delete_doc("Event", ev.name)
# insert again
ev = frappe.get_doc(self.test_records[0]).insert()
# the name should be same!
self.assertEqual(ev.name, name)
def test_assign(self):
from frappe.desk.form.assign_to import add
ev = frappe.get_doc(self.test_records[0]).insert()
add({
"assign_to": "test@example.com",
"doctype": "Event",
"name": ev.name,
"description": "Test Assignment"
})
ev = frappe.get_doc("Event", ev.name)
self.assertEqual(ev._assign, json.dumps(["test@example.com"]))
# add another one
add({
"assign_to": self.test_user,
"doctype": "Event",
"name": ev.name,
"description": "Test Assignment"
})
ev = frappe.get_doc("Event", ev.name)
self.assertEqual(set(json.loads(ev._assign)), set(["test@example.com", self.test_user]))
# close an assignment
todo = frappe.get_doc("ToDo", {"reference_type": ev.doctype, "reference_name": ev.name,
"owner": self.test_user})
todo.status = "Closed"
todo.save()
ev = frappe.get_doc("Event", ev.name)
self.assertEqual(ev._assign, json.dumps(["test@example.com"]))
# cleanup
ev.delete()
def test_recurring(self):
ev = frappe.get_doc({
"doctype":"Event",
"subject": "_Test Event",
"starts_on": "2014-02-01",
"event_type": "Public",
"repeat_this_event": 1,
"repeat_on": "Every Year"
})
ev.insert()
ev_list = get_events("2014-02-01", "2014-02-01", "Administrator", for_reminder=True)
self.assertTrue(bool(list(filter(lambda e: e.name==ev.name, ev_list))))
ev_list1 = get_events("2015-01-20", "2015-01-20", "Administrator", for_reminder=True)
self.assertFalse(bool(list(filter(lambda e: e.name==ev.name, ev_list1))))
ev_list2 = get_events("2014-02-20", "2014-02-20", "Administrator", for_reminder=True)
self.assertFalse(bool(list(filter(lambda e: e.name==ev.name, ev_list2))))
ev_list3 = get_events("2015-02-01", "2015-02-01", "Administrator", for_reminder=True)
self.assertTrue(bool(list(filter(lambda e: e.name==ev.name, ev_list3))))
| ESS-LLP/frappe | frappe/desk/doctype/event/test_event.py | Python | mit | 4,053 |
"""scan URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from backend.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'post/', post),
url(r'posts/', getPosts),
url(r'auth/', auth),
url(r'register/', register),
url(r'vote/', vote),
url(r'lookup/', lookup),
url(r'search/', search),
]
| phat-plats/backend | scan/scan/urls.py | Python | mit | 984 |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import specs.fbthrift as fbthrift
import specs.fmt as fmt
import specs.folly as folly
import specs.gmock as gmock
import specs.sodium as sodium
from shell_quoting import ShellQuoted
def fbcode_builder_spec(builder):
builder.add_option('zeromq/libzmq:git_hash', 'v4.2.2')
return {
'depends_on': [fmt, folly, fbthrift, gmock, sodium],
'steps': [
builder.github_project_workdir('zeromq/libzmq', '.'),
builder.step('Build and install zeromq/libzmq', [
builder.run(ShellQuoted('./autogen.sh')),
builder.configure(),
builder.make_and_install(),
]),
builder.fb_github_project_workdir('fbzmq/_build', 'facebook'),
builder.step('Build and install fbzmq/', [
builder.cmake_configure('fbzmq/_build'),
# we need the pythonpath to find the thrift compiler
builder.run(ShellQuoted(
'PYTHONPATH="$PYTHONPATH:"{p}/lib/python2.7/site-packages '
'make -j {n}'
).format(p=builder.option('prefix'), n=builder.option('make_parallelism'))),
builder.run(ShellQuoted('make install')),
]),
],
}
| nodakai/watchman | build/fbcode_builder/specs/fbzmq.py | Python | apache-2.0 | 1,473 |
"""Test the Emulated Hue component."""
from unittest.mock import MagicMock, Mock, patch
from homeassistant.components.emulated_hue import Config
def test_config_google_home_entity_id_to_number():
"""Test config adheres to the type."""
mock_hass = Mock()
mock_hass.config.path = MagicMock("path", return_value="test_path")
conf = Config(mock_hass, {"type": "google_home"})
with patch(
"homeassistant.components.emulated_hue.load_json",
return_value={"1": "light.test2"},
) as json_loader:
with patch("homeassistant.components.emulated_hue" ".save_json") as json_saver:
number = conf.entity_id_to_number("light.test")
assert number == "2"
assert json_saver.mock_calls[0][1][1] == {
"1": "light.test2",
"2": "light.test",
}
assert json_saver.call_count == 1
assert json_loader.call_count == 1
number = conf.entity_id_to_number("light.test")
assert number == "2"
assert json_saver.call_count == 1
number = conf.entity_id_to_number("light.test2")
assert number == "1"
assert json_saver.call_count == 1
entity_id = conf.number_to_entity_id("1")
assert entity_id == "light.test2"
def test_config_google_home_entity_id_to_number_altered():
"""Test config adheres to the type."""
mock_hass = Mock()
mock_hass.config.path = MagicMock("path", return_value="test_path")
conf = Config(mock_hass, {"type": "google_home"})
with patch(
"homeassistant.components.emulated_hue.load_json",
return_value={"21": "light.test2"},
) as json_loader:
with patch("homeassistant.components.emulated_hue" ".save_json") as json_saver:
number = conf.entity_id_to_number("light.test")
assert number == "22"
assert json_saver.call_count == 1
assert json_loader.call_count == 1
assert json_saver.mock_calls[0][1][1] == {
"21": "light.test2",
"22": "light.test",
}
number = conf.entity_id_to_number("light.test")
assert number == "22"
assert json_saver.call_count == 1
number = conf.entity_id_to_number("light.test2")
assert number == "21"
assert json_saver.call_count == 1
entity_id = conf.number_to_entity_id("21")
assert entity_id == "light.test2"
def test_config_google_home_entity_id_to_number_empty():
"""Test config adheres to the type."""
mock_hass = Mock()
mock_hass.config.path = MagicMock("path", return_value="test_path")
conf = Config(mock_hass, {"type": "google_home"})
with patch(
"homeassistant.components.emulated_hue.load_json", return_value={}
) as json_loader:
with patch("homeassistant.components.emulated_hue" ".save_json") as json_saver:
number = conf.entity_id_to_number("light.test")
assert number == "1"
assert json_saver.call_count == 1
assert json_loader.call_count == 1
assert json_saver.mock_calls[0][1][1] == {"1": "light.test"}
number = conf.entity_id_to_number("light.test")
assert number == "1"
assert json_saver.call_count == 1
number = conf.entity_id_to_number("light.test2")
assert number == "2"
assert json_saver.call_count == 2
entity_id = conf.number_to_entity_id("2")
assert entity_id == "light.test2"
def test_config_alexa_entity_id_to_number():
"""Test config adheres to the type."""
conf = Config(None, {"type": "alexa"})
number = conf.entity_id_to_number("light.test")
assert number == "light.test"
number = conf.entity_id_to_number("light.test")
assert number == "light.test"
number = conf.entity_id_to_number("light.test2")
assert number == "light.test2"
entity_id = conf.number_to_entity_id("light.test")
assert entity_id == "light.test"
| leppa/home-assistant | tests/components/emulated_hue/test_init.py | Python | apache-2.0 | 4,112 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((1566.86, 2827.83, 5826.01), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((1608.32, 3103.53, 5779.84), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1847.96, 3012.47, 5485.36), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((1870.1, 3300.96, 5791.83), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((1906.16, 3689.14, 6035.29), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((1942.17, 3633.96, 5474.97), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((1994.9, 3662.89, 4987.34), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2110.13, 3934.64, 5421.98), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2230.37, 4273.74, 5809.41), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2159.9, 4212.09, 5405.84), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((1987.44, 4015.9, 5005.47), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((1721.58, 3694.45, 4570.62), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((1314.35, 3747.79, 4369.78), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((1005.87, 3877.15, 4163.74), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((565.496, 4018.71, 3956.13), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((-13.9147, 3883.07, 3806.88), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((-551.357, 3626.04, 3748.52), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((-711.893, 3345.94, 4019.03), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((-889.483, 3176.47, 4470.47), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((-1288.08, 3090.61, 5102.43), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((-721.898, 3102.72, 4844.55), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((-434.552, 3446.47, 4811.73), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((-311.948, 3827.15, 4967.55), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((-17.1215, 3801.73, 5114.47), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((-33.8019, 3553, 5258.83), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((-93.204, 3315.37, 5092.96), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((189.9, 3394.29, 4742.47), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((504.127, 3079.9, 4316.09), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((779.094, 2782.72, 3922.16), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((1167.35, 2954.8, 3962.25), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((1392.68, 3149.4, 3877.15), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1085.04, 3131.15, 3879.74), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((1069.54, 2878.62, 4035.63), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((1043.58, 2640.63, 4170.39), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((967.867, 2455.69, 4122.13), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((690.203, 2249.77, 4267.11), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((1204.94, 2755.69, 5047.76), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((1730.32, 2891.25, 5672.62), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((2326.3, 2747.16, 5735.75), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((2607.75, 2833.57, 5128.6), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((2397.57, 3044.92, 4379.12), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((1793.09, 3025.04, 3778.32), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((1578.99, 2610.32, 3597.68), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((1807.29, 2367.07, 3661.13), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((2278.71, 1963.58, 3440.32), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2673.98, 1847.74, 3639.42), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((2118.99, 2009.73, 4020.31), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((2729.47, 1688.37, 4154.2), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((3194.62, 1144.83, 4289.22), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((2919.85, 1203.57, 3834.31), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((2543.66, 1626.93, 3580.58), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((2458.31, 1746.83, 3394.55), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((2901.32, 1304.88, 3306.72), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((3304.81, 1003.38, 3211.69), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((3701.46, 835.813, 3104.07), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((3636.46, 1441.65, 2900.24), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((3219.66, 1721.63, 2824.82), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2712.71, 1978.48, 2832.56), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2253.68, 2183.64, 2976.27), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((2169.72, 1935.3, 3108.25), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2132.39, 1788.16, 3140), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2448.98, 1631.05, 2867.19), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((2947.59, 1265.24, 2353.97), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((3525.54, 739.349, 1750.76), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((3554.02, 1287.96, 2118.05), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((3454.41, 1546.57, 2486.2), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((3189.93, 1446.29, 2565.95), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((2971.86, 1552.22, 2779.91), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((2790.82, 1484.32, 3108.46), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((2634.03, 1284.41, 3509.56), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((2433.45, 883.163, 3813.74), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((2431.39, 500.87, 3678.8), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((2859.43, 493.175, 3056.76), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((3376.42, 599.199, 2419.89), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((3844.17, 873.3, 2011.13), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((4082.81, 1209.5, 1912.67), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((3850.49, 947.357, 1594.13), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((3671.6, 561.739, 1759.96), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((3795.75, 394.214, 1759.24), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((3845.45, 245.206, 1417.21), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((3390.87, 705.972, 1051.64), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((2956.49, 1243.03, 1256.98), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((2539.82, 1418.93, 1655.44), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((2221.53, 1405.97, 1706.83), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((2624.39, 994.39, 1057.32), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((3069.1, 1084.75, 1385), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((3269.73, 1344.34, 1906.93), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((3495.63, 1604.55, 2232.4), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((3982.13, 1569.36, 1928.1), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((3862.11, 1235.27, 2060.06), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((3636.22, 1044.61, 2352.58), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((3413.48, 1283.68, 2426.01), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((3563.83, 1599.76, 2393.18), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((3802.6, 1896.64, 2562.91), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((4319.91, 1822.59, 2867.82), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((4614.4, 1424.27, 3086.3), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((4099.37, 1078.19, 3009.52), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((3685.32, 1197.17, 2763.58), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((3881.53, 1435.1, 2629.77), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((4305.33, 1104.04, 2518.86), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((3337.01, 1176.56, 2627.14), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((2357.91, 1499.62, 2690.65), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((2106.39, 1932.97, 2581.66), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((2389.23, 1826.55, 2339.87), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((2723.73, 1649.91, 2277.82), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((3084.94, 1480.91, 2341.81), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((3387.83, 1364.47, 2609.06), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((3679.86, 1572.79, 2709.34), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((3916.99, 1783.23, 2762.74), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((3791.43, 1981.75, 2611.06), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((3631.49, 2058.01, 2396.97), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((3825.86, 2195.39, 2171.9), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((3455.62, 2391.48, 2179.61), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((3062.37, 2607.31, 2195.86), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2713.84, 2366.33, 2243.99), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((2401.49, 1950.78, 2011.07), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((1973.86, 1629.56, 1778.7), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((1883.95, 1260.33, 1225.27), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((2007.72, 860.669, 885.783), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((2203.9, 1293.74, 1063.65), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((2379.72, 1477.27, 1383.91), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((2710.83, 1612.17, 1732.15), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((3231.83, 1450.24, 1896.05), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((3549.2, 1595.14, 2241.14), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((3872.39, 1746.22, 2471.42), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((4090.08, 2051.11, 2777.04), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((4628.77, 2117.79, 3033.32), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((5104.81, 1970, 3287.64), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((4835, 1579.11, 3060.92), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((4244.16, 1260.56, 2815.59), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((3998.31, 1224.11, 2411.23), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((3673.22, 1351.59, 2130.7), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((3507.42, 1618.56, 1879.49), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((3601.41, 1739.29, 1499.99), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((3382.34, 1991.17, 1358.6), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((3047.41, 1975.85, 1342.98), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((2804.91, 1871.6, 1753.03), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2485.87, 1951.91, 2165.11), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2314.15, 2247.41, 2506.24), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2291.25, 2013.78, 2585.98), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2279.01, 1708.57, 2333.66), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((2510.4, 1584.24, 2145.52), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((2779.86, 1687.67, 1928.22), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((2997.25, 1724.7, 1697.09), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((3210.25, 1627.11, 1449.07), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((2938.28, 1679.8, 1733.63), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((2890.42, 1843.61, 2019.07), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((3123.78, 1875.77, 2218.6), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((2908.86, 2043.03, 2567.16), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2844.64, 2240.64, 2859.3), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2943.68, 2488.8, 2639.91), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((3262.23, 2670.91, 2343.59), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((3512.19, 2812.75, 2113.99), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((3207.93, 2673.06, 2015.96), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2759.78, 2535.74, 2345.49), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2435.05, 2345.52, 2620.15), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((2626.82, 2172.59, 2853.52), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((2994.84, 2049.15, 2697.61), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((3270.64, 1779.31, 2372.46), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((2911.88, 1547.44, 2199.92), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((2361.8, 1626.88, 2207.34), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((2259.84, 1958.33, 1982.01), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((2713.59, 1806.75, 1539.71), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((3344.42, 1534.27, 942.484), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((3514.34, 1816.98, 861.102), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((3515.36, 2062.21, 1114.15), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((3345.59, 2288.99, 967.71), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((3019.27, 2220.63, 1476.86), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((2676.92, 2143.97, 2001.43), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((2485.4, 1925.77, 2217.19), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((2726.18, 1789.98, 1819.18), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((2971.06, 1646.21, 1404.07), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((2807.16, 1447.27, 1448.95), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((2579.91, 1631.07, 1405.08), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((2549.09, 1828.28, 1134.19), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((2806.79, 1651.74, 1032.95), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((3015.98, 1248.56, 1260.99), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((3332.75, 842.258, 1567.32), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((3122.03, 634.902, 2023.73), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((2762.91, 814.814, 2279.6), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((2457.19, 1334.14, 2394.55), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((2332.7, 1772.23, 2493.32), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((2693.75, 1707.42, 2278.7), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((3087, 1395.18, 1858.27), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((3143.76, 1557.24, 1521.77), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((2773.87, 1510.24, 1501.01), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((2527.39, 1271.55, 1433.97), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((2396.49, 907.242, 1232.75), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((2475.46, 1522.25, 1234.15), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((2792.56, 1855.72, 1391.06), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((3149.64, 1932.82, 1523.41), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((3561.18, 2281.27, 1678.55), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((3391.28, 2639.86, 2121.06), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((3633.96, 2813.41, 2451.55), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((4163, 2875.45, 2573.36), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((5111.47, 2945.76, 2514.28), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((5112.05, 2440.55, 2603.85), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((4506.67, 2019.27, 2522.37), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((3584.87, 1842.82, 2700.99), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((2934.35, 1856.71, 2711.44), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((2887.16, 1696.44, 2309.17), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((2968.11, 1743.47, 1849.11), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((3024.65, 1994.08, 1486.54), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((2985.81, 2310.1, 1663.4), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((3240.81, 2223.71, 1812.91), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((3468.35, 2161.46, 1834.74), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((2904.31, 2281.87, 2268.25), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((2670.96, 2162.14, 2682.23), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((3430.03, 1764.98, 2530.27), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((4292.23, 1690.17, 1978.54), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((3928.6, 1742.99, 1452.18), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((3414.82, 1449.55, 1106.76), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((2886.99, 931.757, 1345.22), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((2328.18, 797.047, 1145.58), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((1930.68, 1001.53, 678.746), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((2074.05, 1356.45, 454.708), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((1923.01, 1464.76, 1033.48), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((1689.19, 1310.39, 1409.44), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((1520.52, 1339.88, 1776.44), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((1670.29, 1079.84, 1923.99), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((2103.28, 829.032, 1596.68), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((2613.93, 989.748, 1370.87), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((2781.87, 1141.33, 1773.25), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((2381.04, 1448.1, 2311.71), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((2154.55, 1503.51, 2415.69), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((2285.19, 1532.51, 2162.99), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((2681.08, 1427.06, 2207.47), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((2702.42, 1854.35, 2033.98), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2504.43, 2274.13, 2126.26), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((2440.75, 2744.72, 2134.01), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((2742.45, 2939.25, 1860.73), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((2899.3, 3017.01, 1591.6), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((2573.41, 2878.96, 1581.95), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((2253.5, 2968.85, 1643.55), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((2460.42, 2688.27, 1942.02), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((2746.59, 2331.8, 1877.54), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((3087.74, 2043.78, 1434.72), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((3124.73, 1666.4, 1044.16), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((3129.27, 1763.88, 628.518), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((3386, 1947.25, 1000.26), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((2964.83, 2488.02, 1262.99), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((2749.81, 2740.02, 1650.99), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((3044.25, 2725.29, 1561.02), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((2941.62, 2631.74, 1921.28), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((2556.42, 2560.13, 1959.72), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((2244.78, 2553.93, 1878.84), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((2048.62, 2547.26, 2030.91), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((2238.6, 2858.13, 2334.73), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((2488.12, 2816.6, 2074.18), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((2540.32, 2695.84, 1670.35), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((2228.9, 2525.07, 1574.34), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((1960.3, 2301.2, 1760.79), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((1711.81, 1938.96, 1774.75), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((1568.76, 1574.2, 1652.17), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((1570.79, 1864.13, 1538.45), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((1769.99, 2015.89, 1894.34), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((1933.63, 2319.56, 2146.54), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((1789.21, 2190.61, 2291.6), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((2043.48, 2340.03, 2422.62), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((2252.34, 2014.29, 2032.1), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((2466.08, 1875.91, 2006.68), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2393.7, 2245.28, 2251), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((2130.91, 2615.46, 2637.3), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((1978.03, 2919.15, 3023.01), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((1913.98, 2674.6, 3267.73), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((2006.76, 2430.48, 3134.52), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((1854.14, 2612.26, 3202.94), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((1774.05, 2589.86, 3195.21), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((1686.58, 2541.08, 2851.5), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((1617.88, 2753.4, 2333.04), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((1404.26, 2643.13, 2428.83), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((1243.68, 2626.83, 2808.78), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((1454.73, 2661.18, 1958.52), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((1602.37, 2601.07, 965.1), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((1861.23, 2398.4, 982.016), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((2143.98, 2467.14, 1274.86), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((2360.44, 2555.39, 1493.72), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((2209.13, 2541.23, 2109.34), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((1982.93, 2556.04, 2681.49), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((1967.27, 2606.91, 2692.17), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((2225.94, 2798.1, 2364.25), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((2154.36, 3256.6, 2778.16), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((1749.13, 3572.68, 3074.5), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((1631.81, 3723.78, 3083.1), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((1744.01, 3856.42, 3022.9), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((1514.28, 4171.65, 2624.94), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((1417.01, 4727.14, 2595.36), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((1599.28, 4740.84, 2944.67), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((1875.79, 4525.93, 3300.12), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((2353.09, 4877.07, 3517.33), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((2851.79, 5547.68, 3601.91), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((2964.36, 5599.05, 3158.08), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((2006.33, 4623.23, 2803.49), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((2763.82, 4954.71, 3573.14), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((2977.09, 5073.16, 3978.7), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((2747.2, 5182.93, 3793), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((2613.38, 5571.23, 3639.87), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((2249.33, 5128, 3355.43), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((1888.01, 4387.55, 3277.2), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((1741.6, 4088.53, 3015.72), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((1556.69, 4156.26, 3126.21), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((1365.01, 4365.34, 2818.6), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((1404.18, 4466.14, 2446.02), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((1600.87, 4490.32, 2114.95), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((1213, 4583.84, 2043.68), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((995.579, 4930.98, 1958.38), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((934.263, 5133.49, 2130.55), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((1079.52, 4646.56, 2451.81), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((1416.26, 4209.56, 2759.84), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((1836.22, 3937.48, 2775.48), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((1793.81, 3798.47, 2922.96), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((1787.91, 3811.51, 2894.69), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((1618.94, 4040.08, 3244.47), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((1323.46, 4411.62, 3713.8), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((1101.05, 4322.26, 3548.45), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((1188.08, 4110.91, 3297.5), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((1418.32, 4129.72, 3608.95), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((1674.63, 4673.68, 3801.08), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((2214.4, 5259.49, 3625.18), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((2681.62, 5114.91, 3429.69), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((2837.11, 4707.1, 3512.91), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((3094.1, 4450.25, 3553.6), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((3394.8, 4506.83, 3551.07), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((3716.56, 4817.81, 3566.38), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((3299.59, 4705.88, 3068.03), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((2980.5, 4419.97, 2737.72), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((3251.57, 4241.33, 2357.82), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((3422.81, 4298.57, 1903.02), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((3477.53, 3941.37, 2204.33), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((3463.69, 3732.6, 2545.5), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((3602.19, 3896.14, 2823.39), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((3802.89, 4242.74, 2985), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((4022.57, 4650.73, 3065.07), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((3677.06, 4533.81, 2832.44), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((3117.98, 4342.8, 2603.03), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((2598.93, 3891.44, 2483.26), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((2153.8, 3760.51, 2258), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((1792.09, 4006.76, 1898.26), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((1454.92, 4368.7, 1805.55), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((1032.27, 4332.61, 2088.81), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((917.583, 4618.51, 2389.84), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((832.234, 4811.13, 2064.19), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((793.39, 4441.59, 1973.26), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((1096.24, 4220.27, 1896.12), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((1567.77, 4381.5, 1816.86), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((1820.59, 4762.51, 1399.26), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((2367.48, 4798.73, 1425.94), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((2844.6, 4440.88, 1717.1), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((3090.93, 4492.54, 2190.1), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((3375.82, 4555.41, 2461.2), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((3559.98, 4278.52, 2349.28), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((3219.37, 4120.93, 2088.26), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((2678.89, 4152.75, 1932.33), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((2063.61, 4406.92, 1751.96), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((1616.97, 4668.27, 1699.07), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((1300.25, 4566.03, 1707.43), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((1238.11, 4131.84, 2125.8), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((926.715, 3527.54, 2460.26), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| batxes/4c2vhic | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/SHH_WT_models_highres40612.py | Python | gpl-3.0 | 88,200 |
import fontaine
import os
import os.path
import re
import unicodedata
from fontaine.ext.base import BaseExt
UNICODE_VALUE_REGEX = re.compile('^(?P<bx>0x)?(?P<begr>[0-9a-f]+)(\-(?!0x)(?P<endr>[0-9a-f]+))?', re.I)
INCLUDE_REGEX = re.compile('include ([\w]+.orth)', re.I | re.U | re.S)
dirname = os.path.dirname(fontaine.__file__)
ORTH_SOURCE_DIR = os.path.join(dirname, 'charsets', 'fontconfig', 'fc-lang')
class Extension(BaseExt):
extension_name = 'fontconfig'
description = 'FontConfig collection'
@staticmethod
def __getcharsets__():
for ext in Extension.iterate_orth():
unicodes, common_name, abbr = Extension.get_orth_charset(ext)
if not common_name:
continue
yield type('Charset', (object,),
dict(glyphs=unicodes, common_name=common_name,
native_name='', abbreviation=abbr,
short_name=unicodedata.normalize('NFKD', u'fontconfig-{}'.format(abbr))))
@staticmethod
def iterate_orth():
if not os.path.exists(ORTH_SOURCE_DIR):
return []
result = []
for fileorth in os.listdir(ORTH_SOURCE_DIR):
if os.path.splitext(fileorth)[1] == '.orth':
result.append(os.path.join(ORTH_SOURCE_DIR, fileorth))
return result
@staticmethod
def get_string_glyphlist(filename, content):
glyphs = []
fn, ext = os.path.splitext(os.path.basename(filename))
# unordinarynames = {
# ''
# }
common_name_regex = re.compile(u'#\s+([\u00E5\u00ED\u00E1,\s\(\)\'\w/-]+)\s*\(([\w_-]{2,6})\)', re.I | re.U | re.S)
content = content.replace('# Chinese (traditional) ZH-TW', '# Chinese traditional (ZH-TW)')
common_name_match = common_name_regex.search(content)
if common_name_match:
common_name = u'%s (fc-lang/%s.orth)'
common_name = common_name % (common_name_match.group(1), fn)
else:
# print(fn)
# print(content.decode('utf-8', 'ignore'))
return [], '', ''
for line in content.split('\n'):
unicode_match = UNICODE_VALUE_REGEX.match(line.strip())
if not unicode_match:
continue
value = '0x' + unicode_match.group('begr')
if unicode_match.group('endr'):
value = value + '-0x' + unicode_match.group('endr')
glyphs.append(value)
regex = INCLUDE_REGEX.search(content)
if regex:
include = os.path.join(ORTH_SOURCE_DIR, regex.group(1))
with open(include) as fp:
content = fp.read()
name, abbr, ng = Extension.get_string_glyphlist(include, content)
if name and ng:
glyphs += ng.split(',')
common_name += u' + %s' % name
return common_name, common_name_match.group(2), ','.join(glyphs)
@staticmethod
def get_orth_charset(orthfile):
with open(orthfile) as fp:
content = fp.read()
name, abbr, glyphlist = Extension.get_string_glyphlist(orthfile, content)
if not name:
return [], '', ''
return Extension.convert_to_list_of_unicodes(glyphlist), name, abbr
| davelab6/pyfontaine | fontaine/ext/fontconfig.py | Python | gpl-3.0 | 3,314 |
#!/usr/bin/python
"""
PDU
"""
import re
import socket
import struct
try:
import netifaces
except ImportError:
netifaces = None
from .settings import settings
from .debugging import ModuleLogger, bacpypes_debugging, btox, xtob
from .comm import PCI as _PCI, PDUData
# pack/unpack constants
_short_mask = 0xFFFF
_long_mask = 0xFFFFFFFF
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# Address
#
_field_address = r"((?:\d+)|(?:0x(?:[0-9A-Fa-f][0-9A-Fa-f])+))"
_ip_address_port = r"(\d+\.\d+\.\d+\.\d+)(?::(\d+))?"
_ip_address_mask_port = r"(\d+\.\d+\.\d+\.\d+)(?:/(\d+))?(?::(\d+))?"
_net_ip_address_port = r"(\d+):" + _ip_address_port
_at_route = "(?:[@](?:" + _field_address + "|" + _ip_address_port + "))?"
field_address_re = re.compile("^" + _field_address + "$")
ip_address_port_re = re.compile("^" + _ip_address_port + "$")
ip_address_mask_port_re = re.compile("^" + _ip_address_mask_port + "$")
net_ip_address_port_re = re.compile("^" + _net_ip_address_port + "$")
net_ip_address_mask_port_re = re.compile("^" + _net_ip_address_port + "$")
ethernet_re = re.compile(r'^([0-9A-Fa-f][0-9A-Fa-f][:]){5}([0-9A-Fa-f][0-9A-Fa-f])$' )
interface_re = re.compile(r'^(?:([\w]+))(?::(\d+))?$')
net_broadcast_route_re = re.compile("^([0-9])+:[*]" + _at_route + "$")
net_station_route_re = re.compile("^([0-9])+:" + _field_address + _at_route + "$")
net_ip_address_route_re = re.compile("^([0-9])+:" + _ip_address_port + _at_route + "$")
combined_pattern = re.compile("^(?:(?:([0-9]+)|([*])):)?(?:([*])|" + _field_address + "|" + _ip_address_mask_port + ")" + _at_route + "$")
@bacpypes_debugging
class Address:
nullAddr = 0
localBroadcastAddr = 1
localStationAddr = 2
remoteBroadcastAddr = 3
remoteStationAddr = 4
globalBroadcastAddr = 5
def __init__(self, *args):
if _debug: Address._debug("__init__ %r", args)
self.addrType = Address.nullAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
self.addrRoute = None
if len(args) == 1:
self.decode_address(args[0])
elif len(args) == 2:
self.decode_address(args[1])
if self.addrType == Address.localStationAddr:
self.addrType = Address.remoteStationAddr
self.addrNet = args[0]
elif self.addrType == Address.localBroadcastAddr:
self.addrType = Address.remoteBroadcastAddr
self.addrNet = args[0]
else:
raise ValueError("unrecognized address ctor form")
def decode_address(self, addr):
"""Initialize the address from a string. Lots of different forms are supported."""
if _debug: Address._debug("decode_address %r (%s)", addr, type(addr))
# start out assuming this is a local station and didn't get routed
self.addrType = Address.localStationAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
self.addrRoute = None
if addr == "*":
if _debug: Address._debug(" - localBroadcast")
self.addrType = Address.localBroadcastAddr
elif addr == "*:*":
if _debug: Address._debug(" - globalBroadcast")
self.addrType = Address.globalBroadcastAddr
elif isinstance(addr, int):
if _debug: Address._debug(" - int")
if (addr < 0) or (addr >= 256):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif isinstance(addr, (bytes, bytearray)):
if _debug: Address._debug(" - bytes or bytearray")
self.addrAddr = bytes(addr)
self.addrLen = len(addr)
if self.addrLen == 6:
self.addrIP = struct.unpack('!L', addr[:4])[0]
self.addrMask = (1 << 32) - 1
self.addrHost = (self.addrIP & ~self.addrMask)
self.addrSubnet = (self.addrIP & self.addrMask)
self.addrPort = struct.unpack(">H", addr[4:])[0]
self.addrTuple = (socket.inet_ntoa(addr[:4]), self.addrPort)
self.addrBroadcastTuple = ('255.255.255.255', self.addrPort)
elif isinstance(addr, str):
if _debug: Address._debug(" - str")
m = combined_pattern.match(addr)
if m:
if _debug: Address._debug(" - combined pattern")
(net, global_broadcast,
local_broadcast,
local_addr,
local_ip_addr, local_ip_net, local_ip_port,
route_addr, route_ip_addr, route_ip_port
) = m.groups()
if global_broadcast and local_broadcast:
if _debug: Address._debug(" - global broadcast")
self.addrType = Address.globalBroadcastAddr
elif net and local_broadcast:
if _debug: Address._debug(" - remote broadcast")
net_addr = int(net)
if (net_addr >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteBroadcastAddr
self.addrNet = net_addr
elif local_broadcast:
if _debug: Address._debug(" - local broadcast")
self.addrType = Address.localBroadcastAddr
elif net:
if _debug: Address._debug(" - remote station")
net_addr = int(net)
if (net_addr >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net_addr
if local_addr:
if _debug: Address._debug(" - simple address")
if local_addr.startswith("0x"):
self.addrAddr = xtob(local_addr[2:])
self.addrLen = len(self.addrAddr)
else:
local_addr = int(local_addr)
if local_addr >= 256:
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', local_addr)
self.addrLen = 1
if local_ip_addr:
if _debug: Address._debug(" - ip address")
if not local_ip_port:
local_ip_port = '47808'
if not local_ip_net:
local_ip_net = '32'
self.addrPort = int(local_ip_port)
self.addrTuple = (local_ip_addr, self.addrPort)
if _debug: Address._debug(" - addrTuple: %r", self.addrTuple)
addrstr = socket.inet_aton(local_ip_addr)
self.addrIP = struct.unpack('!L', addrstr)[0]
self.addrMask = (_long_mask << (32 - int(local_ip_net))) & _long_mask
self.addrHost = (self.addrIP & ~self.addrMask)
self.addrSubnet = (self.addrIP & self.addrMask)
bcast = (self.addrSubnet | ~self.addrMask)
self.addrBroadcastTuple = (socket.inet_ntoa(struct.pack('!L', bcast & _long_mask)), self.addrPort)
if _debug: Address._debug(" - addrBroadcastTuple: %r", self.addrBroadcastTuple)
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
if (not settings.route_aware) and (route_addr or route_ip_addr):
Address._warning("route provided but not route aware: %r", addr)
if route_addr:
if route_addr.startswith("0x"):
self.addrRoute = Address(xtob(route_addr[2:]))
else:
self.addrRoute = Address(int(route_addr))
if _debug: Address._debug(" - addrRoute: %r", self.addrRoute)
elif route_ip_addr:
if not route_ip_port:
route_ip_port = '47808'
self.addrRoute = Address((route_ip_addr, int(route_ip_port)))
if _debug: Address._debug(" - addrRoute: %r", self.addrRoute)
return
if ethernet_re.match(addr):
if _debug: Address._debug(" - ethernet")
self.addrAddr = xtob(addr, ':')
self.addrLen = len(self.addrAddr)
return
if re.match(r"^\d+$", addr):
if _debug: Address._debug(" - int")
addr = int(addr)
if (addr > 255):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
return
if re.match(r"^\d+:[*]$", addr):
if _debug: Address._debug(" - remote broadcast")
addr = int(addr[:-2])
if (addr >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteBroadcastAddr
self.addrNet = addr
self.addrAddr = None
self.addrLen = None
return
if re.match(r"^\d+:\d+$",addr):
if _debug: Address._debug(" - remote station")
net, addr = addr.split(':')
net = int(net)
addr = int(addr)
if (net >= 65535):
raise ValueError("network out of range")
if (addr > 255):
raise ValueError("address out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
return
if re.match(r"^0x([0-9A-Fa-f][0-9A-Fa-f])+$",addr):
if _debug: Address._debug(" - modern hex string")
self.addrAddr = xtob(addr[2:])
self.addrLen = len(self.addrAddr)
return
if re.match(r"^X'([0-9A-Fa-f][0-9A-Fa-f])+'$",addr):
if _debug: Address._debug(" - old school hex string")
self.addrAddr = xtob(addr[2:-1])
self.addrLen = len(self.addrAddr)
return
if re.match(r"^\d+:0x([0-9A-Fa-f][0-9A-Fa-f])+$",addr):
if _debug: Address._debug(" - remote station with modern hex string")
net, addr = addr.split(':')
net = int(net)
if (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = xtob(addr[2:])
self.addrLen = len(self.addrAddr)
return
if re.match(r"^\d+:X'([0-9A-Fa-f][0-9A-Fa-f])+'$",addr):
if _debug: Address._debug(" - remote station with old school hex string")
net, addr = addr.split(':')
net = int(net)
if (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = xtob(addr[2:-1])
self.addrLen = len(self.addrAddr)
return
if netifaces and interface_re.match(addr):
if _debug: Address._debug(" - interface name with optional port")
interface, port = interface_re.match(addr).groups()
if port is not None:
self.addrPort = int(port)
else:
self.addrPort = 47808
interfaces = netifaces.interfaces()
if interface not in interfaces:
raise ValueError("not an interface: %s" % (interface,))
if _debug: Address._debug(" - interfaces: %r", interfaces)
ifaddresses = netifaces.ifaddresses(interface)
if netifaces.AF_INET not in ifaddresses:
raise ValueError("interface does not support IPv4: %s" % (interface,))
ipv4addresses = ifaddresses[netifaces.AF_INET]
if len(ipv4addresses) > 1:
raise ValueError("interface supports multiple IPv4 addresses: %s" % (interface,))
ifaddress = ipv4addresses[0]
if _debug: Address._debug(" - ifaddress: %r", ifaddress)
addr = ifaddress['addr']
self.addrTuple = (addr, self.addrPort)
if _debug: Address._debug(" - addrTuple: %r", self.addrTuple)
addrstr = socket.inet_aton(addr)
self.addrIP = struct.unpack('!L', addrstr)[0]
if 'netmask' in ifaddress:
maskstr = socket.inet_aton(ifaddress['netmask'])
self.addrMask = struct.unpack('!L', maskstr)[0]
else:
self.addrMask = _long_mask
self.addrHost = (self.addrIP & ~self.addrMask)
self.addrSubnet = (self.addrIP & self.addrMask)
if 'broadcast' in ifaddress:
self.addrBroadcastTuple = (ifaddress['broadcast'], self.addrPort)
else:
self.addrBroadcastTuple = None
if _debug: Address._debug(" - addrBroadcastTuple: %r", self.addrBroadcastTuple)
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
return
raise ValueError("unrecognized format")
elif isinstance(addr, tuple):
addr, port = addr
self.addrPort = int(port)
if isinstance(addr, str):
if not addr:
# when ('', n) is passed it is the local host address, but that
# could be more than one on a multihomed machine, the empty string
# means "any".
addrstr = b'\0\0\0\0'
else:
addrstr = socket.inet_aton(addr)
self.addrTuple = (addr, self.addrPort)
elif isinstance(addr, int):
addrstr = struct.pack('!L', addr & _long_mask)
self.addrTuple = (socket.inet_ntoa(addrstr), self.addrPort)
else:
raise TypeError("tuple must be (string, port) or (long, port)")
if _debug: Address._debug(" - addrstr: %r", addrstr)
self.addrIP = struct.unpack('!L', addrstr)[0]
self.addrMask = _long_mask
self.addrHost = None
self.addrSubnet = None
self.addrBroadcastTuple = self.addrTuple
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
else:
raise TypeError("integer, string or tuple required")
def __str__(self):
if self.addrType == Address.nullAddr:
rslt = 'Null'
elif self.addrType == Address.localBroadcastAddr:
rslt = '*'
elif self.addrType == Address.localStationAddr:
rslt = ''
if self.addrLen == 1:
rslt += str(self.addrAddr[0])
else:
port = struct.unpack('!H', self.addrAddr[-2:])[0]
if (len(self.addrAddr) == 6) and (port >= 47808) and (port <= 47823):
rslt += '.'.join(["%d" % (x) for x in self.addrAddr[0:4]])
if port != 47808:
rslt += ':' + str(port)
else:
rslt += '0x' + btox(self.addrAddr)
elif self.addrType == Address.remoteBroadcastAddr:
rslt = '%d:*' % (self.addrNet,)
elif self.addrType == Address.remoteStationAddr:
rslt = '%d:' % (self.addrNet,)
if self.addrLen == 1:
rslt += str(self.addrAddr[0])
else:
port = struct.unpack('!H', self.addrAddr[-2:])[0]
if (len(self.addrAddr) == 6) and (port >= 47808) and (port <= 47823):
rslt += '.'.join(["%d" % (x) for x in self.addrAddr[0:4]])
if port != 47808:
rslt += ':' + str(port)
else:
rslt += '0x' + btox(self.addrAddr)
elif self.addrType == Address.globalBroadcastAddr:
rslt = "*:*"
else:
raise TypeError("unknown address type %d" % self.addrType)
if self.addrRoute:
rslt += "@" + str(self.addrRoute)
return rslt
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__str__())
def _tuple(self):
if (not settings.route_aware) or (self.addrRoute is None):
return (self.addrType, self.addrNet, self.addrAddr, None)
else:
return (self.addrType, self.addrNet, self.addrAddr, self.addrRoute._tuple())
def __hash__(self):
return hash(self._tuple())
def __eq__(self, arg):
# try an coerce it into an address
if not isinstance(arg, Address):
arg = Address(arg)
# basic components must match
rslt = (self.addrType == arg.addrType)
rslt = rslt and (self.addrNet == arg.addrNet)
rslt = rslt and (self.addrAddr == arg.addrAddr)
# if both have routes they must match
if rslt and self.addrRoute and arg.addrRoute:
rslt = rslt and (self.addrRoute == arg.addrRoute)
return rslt
def __ne__(self, arg):
return not self.__eq__(arg)
def __lt__(self, arg):
return self._tuple() < arg._tuple()
def dict_contents(self, use_dict=None, as_class=None):
"""Return the contents of an object as a dict."""
if _debug: _log.debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# exception to the rule of returning a dict
return str(self)
#
# pack_ip_addr, unpack_ip_addr
#
def pack_ip_addr(addr):
"""Given an IP address tuple like ('1.2.3.4', 47808) return the six-octet string
useful for a BACnet address."""
addr, port = addr
return socket.inet_aton(addr) + struct.pack('!H', port & _short_mask)
def unpack_ip_addr(addr):
"""Given a six-octet BACnet address, return an IP address tuple."""
if isinstance(addr, bytearray):
addr = bytes(addr)
return (socket.inet_ntoa(addr[0:4]), struct.unpack('!H', addr[4:6])[0])
#
# LocalStation
#
class LocalStation(Address):
def __init__(self, addr, route=None):
self.addrType = Address.localStationAddr
self.addrNet = None
self.addrRoute = route
if isinstance(addr, int):
if (addr < 0) or (addr >= 256):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif isinstance(addr, (bytes, bytearray)):
if _debug: Address._debug(" - bytes or bytearray")
self.addrAddr = bytes(addr)
self.addrLen = len(addr)
else:
raise TypeError("integer, bytes or bytearray required")
#
# RemoteStation
#
class RemoteStation(Address):
def __init__(self, net, addr, route=None):
if not isinstance(net, int):
raise TypeError("integer network required")
if (net < 0) or (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrRoute = route
if isinstance(addr, int):
if (addr < 0) or (addr >= 256):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif isinstance(addr, (bytes, bytearray)):
if _debug: Address._debug(" - bytes or bytearray")
self.addrAddr = bytes(addr)
self.addrLen = len(addr)
else:
raise TypeError("integer, bytes or bytearray required")
#
# LocalBroadcast
#
class LocalBroadcast(Address):
def __init__(self, route=None):
self.addrType = Address.localBroadcastAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
self.addrRoute = route
#
# RemoteBroadcast
#
class RemoteBroadcast(Address):
def __init__(self, net, route=None):
if not isinstance(net, int):
raise TypeError("integer network required")
if (net < 0) or (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteBroadcastAddr
self.addrNet = net
self.addrAddr = None
self.addrLen = None
self.addrRoute = route
#
# GlobalBroadcast
#
class GlobalBroadcast(Address):
def __init__(self, route=None):
self.addrType = Address.globalBroadcastAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
self.addrRoute = route
#
# PCI
#
@bacpypes_debugging
class PCI(_PCI):
_debug_contents = ('pduExpectingReply', 'pduNetworkPriority')
def __init__(self, *args, **kwargs):
if _debug: PCI._debug("__init__ %r %r", args, kwargs)
# split out the keyword arguments that belong to this class
my_kwargs = {}
other_kwargs = {}
for element in ('expectingReply', 'networkPriority'):
if element in kwargs:
my_kwargs[element] = kwargs[element]
for kw in kwargs:
if kw not in my_kwargs:
other_kwargs[kw] = kwargs[kw]
if _debug: PCI._debug(" - my_kwargs: %r", my_kwargs)
if _debug: PCI._debug(" - other_kwargs: %r", other_kwargs)
# call some superclass, if there is one
super(PCI, self).__init__(*args, **other_kwargs)
# set the attribute/property values for the ones provided
self.pduExpectingReply = my_kwargs.get('expectingReply', 0) # see 6.2.2 (1 or 0)
self.pduNetworkPriority = my_kwargs.get('networkPriority', 0) # see 6.2.2 (0..3)
def update(self, pci):
"""Copy the PCI fields."""
_PCI.update(self, pci)
# now do the BACnet PCI fields
self.pduExpectingReply = pci.pduExpectingReply
self.pduNetworkPriority = pci.pduNetworkPriority
def pci_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: PCI._debug("pci_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# call the parent class
_PCI.pci_contents(self, use_dict=use_dict, as_class=as_class)
# save the values
use_dict.__setitem__('expectingReply', self.pduExpectingReply)
use_dict.__setitem__('networkPriority', self.pduNetworkPriority)
# return what we built/updated
return use_dict
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: PCI._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
return self.pci_contents(use_dict=use_dict, as_class=as_class)
#
# PDU
#
@bacpypes_debugging
class PDU(PCI, PDUData):
def __init__(self, *args, **kwargs):
if _debug: PDU._debug("__init__ %r %r", args, kwargs)
super(PDU, self).__init__(*args, **kwargs)
def __str__(self):
return '<%s %s -> %s : %s>' % (self.__class__.__name__, self.pduSource, self.pduDestination, btox(self.pduData,'.'))
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: PDUData._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# call into the two base classes
self.pci_contents(use_dict=use_dict, as_class=as_class)
self.pdudata_contents(use_dict=use_dict, as_class=as_class)
# return what we built/updated
return use_dict
| JoelBender/bacpypes | py34/bacpypes/pdu.py | Python | mit | 24,712 |
# Copyright Alexander Baranin 2016
Logging = None
def onLoad(core):
global Logging
Logging = core.loaded_modules['engine.Logging']
Logging.logMessage('testmodules.TestBootstrap.onLoad()')
def onUnload():
Logging.logMessage('testmodules.TestBootstrap.onUnload()') | Boris-Barboris/PySubs | engine/testmodules/TestBootstrap.py | Python | apache-2.0 | 283 |
# Written by Patricia Suriana, MIT ca. 2013
import tasks
import ground
__all__ = ["ParseError", "parse"]
class ParseError(Exception):
pass
def parse(domain_file, problem_file):
domain_line = parse_nested_list(file(domain_file))
problem_line = parse_nested_list(file(problem_file))
domain = tasks.parse_domain(domain_line)
problem = tasks.parse_task(problem_line)
task = ground.ground(domain, problem)
return task
# Basic functions for parsing PDDL files.
def parse_nested_list(input_file):
tokens = tokenize(input_file)
next_token = tokens.next()
if next_token != "(":
raise ParseError("Expected '(', got %s." % next_token)
result = list(parse_list_helper(tokens))
for tok in tokens: # Check that generator is exhausted.
raise ParseError("Unexpected token: %s." % tok)
return result
def tokenize(input):
for line in input:
line = line.split(";", 1)[0] # Strip comments.
line = line.replace("(", " ( ").replace(")", " ) ").replace("?", " ?")
for token in line.split():
yield token.lower()
def parse_list_helper(tokenstream):
# Leading "(" has already been swallowed.
while True:
try:
token = tokenstream.next()
except StopIteration:
raise ParseError()
if token == ")":
return
elif token == "(":
yield list(parse_list_helper(tokenstream))
else:
yield token
if __name__ == "__main__":
import strips
import ground
domain_file = 'domain.pddl'
task_file = 'p0.pddl'
domain_line = parse_nested_list(file(domain_file))
task_line = parse_nested_list(file(task_file))
domain = tasks.parse_domain(domain_line)
predicates, actions = domain
task = tasks.parse_task(task_line)
[problem_name, objects, init, goal] = task
print "Problem Name: " + problem_name
#print "Predicates:"
#for e in predicates:
# print '\t', e, '\n'
#print "Actions:"
#for e in actions:
# print '\t', e, '\n'
statics = ground.get_static_predicates(predicates, actions)
#print "Statics Predicate:"
#for e in statics:
# print '\t', e, '\n'
assignment = {'?obj': 'blockA', '?newcol': 'red', '?origcol': 'none', \
'?paintingtool': 'redsprayer'}
#op = ground.create_operator(actions[4], assignment, statics, init)
#print op
grounded = ground.ground_action(actions[4], objects, statics, init)
for e in grounded:
print '\t', e, '\n'
| LYZhelloworld/Courses | 50.021/02/code/planner/pddl_parser.py | Python | mit | 2,404 |
#!/usr/bin/python
import logging
from logging.handlers import RotatingFileHandler
from app import app
# Configuration
HOST = '0.0.0.0'
PORT = 5000
LOG_FILENAME = "dylansawesome.log"
# Logger
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=10000000, backupCount=5)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
app.logger.addHandler(handler)
# Run the app
app.run(host=HOST, port=PORT)
| zinglax/SPA-BoilerPlate2017 | testing/watchdog/examplefiles/dylansawesome/run.py | Python | mit | 517 |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kfp.components.structures."""
import textwrap
import unittest
from unittest import mock
import pydantic
from absl.testing import parameterized
from kfp.components import structures
V1_YAML_IF_PLACEHOLDER = textwrap.dedent("""\
name: component_if
inputs:
- {name: optional_input_1, type: String, optional: true}
implementation:
container:
image: alpine
args:
- if:
cond:
isPresent: optional_input_1
then:
- --arg1
- {inputValue: optional_input_1}
else:
- --arg2
- default
""")
V2_YAML_IF_PLACEHOLDER = textwrap.dedent("""\
name: component_if
inputs:
optional_input_1: {type: String, default: null}
implementation:
container:
image: alpine
args:
- ifPresent:
inputName: optional_input_1
then:
- --arg1
- {inputValue: optional_input_1}
else: [--arg2, default]
""")
V2_COMPONENT_SPEC_IF_PLACEHOLDER = structures.ComponentSpec(
name='component_if',
implementation=structures.Implementation(
container=structures.ContainerSpec(
image='alpine',
args=[
structures.IfPresentPlaceholder(
if_structure=structures.IfPresentPlaceholderStructure(
input_name='optional_input_1',
then=[
'--arg1',
structures.InputValuePlaceholder(
input_name='optional_input_1'),
],
otherwise=[
'--arg2',
'default',
]))
])),
inputs={
'optional_input_1': structures.InputSpec(type='String', default=None)
},
)
V1_YAML_CONCAT_PLACEHOLDER = textwrap.dedent("""\
name: component_concat
inputs:
- {name: input_prefix, type: String}
implementation:
container:
image: alpine
args:
- concat: ['--arg1', {inputValue: input_prefix}]
""")
V2_YAML_CONCAT_PLACEHOLDER = textwrap.dedent("""\
name: component_concat
inputs:
input_prefix: {type: String}
implementation:
container:
image: alpine
args:
- concat:
- --arg1
- {inputValue: input_prefix}
""")
V2_COMPONENT_SPEC_CONCAT_PLACEHOLDER = structures.ComponentSpec(
name='component_concat',
implementation=structures.Implementation(
container=structures.ContainerSpec(
image='alpine',
args=[
structures.ConcatPlaceholder(concat=[
'--arg1',
structures.InputValuePlaceholder(input_name='input_prefix'),
])
])),
inputs={'input_prefix': structures.InputSpec(type='String')},
)
V2_YAML_NESTED_PLACEHOLDER = textwrap.dedent("""\
name: component_nested
inputs:
input_prefix: {type: String}
implementation:
container:
image: alpine
args:
- concat:
- --arg1
- ifPresent:
inputName: input_prefix
then:
- --arg1
- {inputValue: input_prefix}
else:
- --arg2
- default
- concat:
- --arg1
- {inputValue: input_prefix}
""")
V2_COMPONENT_SPEC_NESTED_PLACEHOLDER = structures.ComponentSpec(
name='component_nested',
implementation=structures.Implementation(
container=structures.ContainerSpec(
image='alpine',
args=[
structures.ConcatPlaceholder(concat=[
'--arg1',
structures.IfPresentPlaceholder(
if_structure=structures.IfPresentPlaceholderStructure(
input_name='input_prefix',
then=[
'--arg1',
structures.InputValuePlaceholder(
input_name='input_prefix'),
],
otherwise=[
'--arg2',
'default',
structures.ConcatPlaceholder(concat=[
'--arg1',
structures.InputValuePlaceholder(
input_name='input_prefix'),
]),
])),
])
])),
inputs={'input_prefix': structures.InputSpec(type='String')},
)
class StructuresTest(parameterized.TestCase):
def test_component_spec_with_placeholder_referencing_nonexisting_input_output(
self):
with self.assertRaisesRegex(
pydantic.ValidationError, 'Argument "input_name=\'input000\'" '
'references non-existing input.'):
structures.ComponentSpec(
name='component_1',
implementation=structures.Implementation(
container=structures.ContainerSpec(
image='alpine',
command=[
'sh',
'-c',
'set -ex\necho "$0" > "$1"',
structures.InputValuePlaceholder(
input_name='input000'),
structures.OutputPathPlaceholder(
output_name='output1'),
],
)),
inputs={'input1': structures.InputSpec(type='String')},
outputs={'output1': structures.OutputSpec(type='String')},
)
with self.assertRaisesRegex(
pydantic.ValidationError,
'Argument "output_name=\'output000\'" '
'references non-existing output.'):
structures.ComponentSpec(
name='component_1',
implementation=structures.Implementation(
container=structures.ContainerSpec(
image='alpine',
command=[
'sh',
'-c',
'set -ex\necho "$0" > "$1"',
structures.InputValuePlaceholder(
input_name='input1'),
structures.OutputPathPlaceholder(
output_name='output000'),
],
)),
inputs={'input1': structures.InputSpec(type='String')},
outputs={'output1': structures.OutputSpec(type='String')},
)
def test_simple_component_spec_save_to_component_yaml(self):
open_mock = mock.mock_open()
expected_yaml = textwrap.dedent("""\
name: component_1
inputs:
input1: {type: String}
outputs:
output1: {type: String}
implementation:
container:
image: alpine
command:
- sh
- -c
- 'set -ex
echo "$0" > "$1"'
- {inputValue: input1}
- {outputPath: output1}
""")
with mock.patch("builtins.open", open_mock, create=True):
structures.ComponentSpec(
name='component_1',
implementation=structures.Implementation(
container=structures.ContainerSpec(
image='alpine',
command=[
'sh',
'-c',
'set -ex\necho "$0" > "$1"',
structures.InputValuePlaceholder(
input_name='input1'),
structures.OutputPathPlaceholder(
output_name='output1'),
],
)),
inputs={
'input1': structures.InputSpec(type='String')
},
outputs={
'output1': structures.OutputSpec(type='String')
},
).save_to_component_yaml('test_save_file.txt')
open_mock.assert_called_with('test_save_file.txt', 'a')
open_mock.return_value.write.assert_called_once_with(expected_yaml)
@parameterized.parameters(
{
'expected_yaml': V2_YAML_IF_PLACEHOLDER,
'component': V2_COMPONENT_SPEC_IF_PLACEHOLDER
},
{
'expected_yaml': V2_YAML_CONCAT_PLACEHOLDER,
'component': V2_COMPONENT_SPEC_CONCAT_PLACEHOLDER
},
{
'expected_yaml': V2_YAML_NESTED_PLACEHOLDER,
'component': V2_COMPONENT_SPEC_NESTED_PLACEHOLDER
},
)
def test_component_spec_placeholder_save_to_component_yaml(
self, expected_yaml, component):
open_mock = mock.mock_open()
with mock.patch("builtins.open", open_mock, create=True):
component.save_to_component_yaml('test_save_file.txt')
open_mock.assert_called_with('test_save_file.txt', 'a')
open_mock.return_value.write.assert_called_once_with(expected_yaml)
def test_simple_component_spec_load_from_v2_component_yaml(self):
component_yaml_v2 = textwrap.dedent("""\
name: component_1
inputs:
input1:
type: String
outputs:
output1:
type: String
implementation:
container:
image: alpine
command:
- sh
- -c
- 'set -ex
echo "$0" > "$1"'
- inputValue: input1
- outputPath: output1
""")
generated_spec = structures.ComponentSpec.load_from_component_yaml(
component_yaml_v2)
expected_spec = structures.ComponentSpec(
name='component_1',
implementation=structures.Implementation(
container=structures.ContainerSpec(
image='alpine',
command=[
'sh',
'-c',
'set -ex\necho "$0" > "$1"',
structures.InputValuePlaceholder(input_name='input1'),
structures.OutputPathPlaceholder(output_name='output1'),
],
)),
inputs={'input1': structures.InputSpec(type='String')},
outputs={'output1': structures.OutputSpec(type='String')})
self.assertEqual(generated_spec, expected_spec)
@parameterized.parameters(
{
'yaml': V1_YAML_IF_PLACEHOLDER,
'expected_component': V2_COMPONENT_SPEC_IF_PLACEHOLDER
},
{
'yaml': V2_YAML_IF_PLACEHOLDER,
'expected_component': V2_COMPONENT_SPEC_IF_PLACEHOLDER
},
{
'yaml': V1_YAML_CONCAT_PLACEHOLDER,
'expected_component': V2_COMPONENT_SPEC_CONCAT_PLACEHOLDER
},
{
'yaml': V2_YAML_CONCAT_PLACEHOLDER,
'expected_component': V2_COMPONENT_SPEC_CONCAT_PLACEHOLDER
},
{
'yaml': V2_YAML_NESTED_PLACEHOLDER,
'expected_component': V2_COMPONENT_SPEC_NESTED_PLACEHOLDER
},
)
def test_component_spec_placeholder_load_from_v2_component_yaml(
self, yaml, expected_component):
generated_spec = structures.ComponentSpec.load_from_component_yaml(yaml)
self.assertEqual(generated_spec, expected_component)
def test_component_spec_load_from_v1_component_yaml(self):
component_yaml_v1 = textwrap.dedent("""\
name: Component with 2 inputs and 2 outputs
inputs:
- {name: Input parameter, type: String}
- {name: Input artifact}
outputs:
- {name: Output 1}
- {name: Output 2}
implementation:
container:
image: busybox
command: [sh, -c, '
mkdir -p $(dirname "$2")
mkdir -p $(dirname "$3")
echo "$0" > "$2"
cp "$1" "$3"
'
]
args:
- {inputValue: Input parameter}
- {inputPath: Input artifact}
- {outputPath: Output 1}
- {outputPath: Output 2}
""")
generated_spec = structures.ComponentSpec.load_from_component_yaml(
component_yaml_v1)
expected_spec = structures.ComponentSpec(
name='Component with 2 inputs and 2 outputs',
implementation=structures.Implementation(
container=structures.ContainerSpec(
image='busybox',
command=[
'sh',
'-c',
(' mkdir -p $(dirname "$2") mkdir -p $(dirname "$3") '
'echo "$0" > "$2" cp "$1" "$3" '),
],
args=[
structures.InputValuePlaceholder(
input_name='input_parameter'),
structures.InputPathPlaceholder(
input_name='input_artifact'),
structures.OutputPathPlaceholder(
output_name='output_1'),
structures.OutputPathPlaceholder(
output_name='output_2'),
],
env={},
)),
inputs={
'input_parameter': structures.InputSpec(type='String'),
'input_artifact': structures.InputSpec(type='Artifact')
},
outputs={
'output_1': structures.OutputSpec(type='Artifact'),
'output_2': structures.OutputSpec(type='Artifact'),
})
self.assertEqual(generated_spec, expected_spec)
if __name__ == '__main__':
unittest.main()
| kubeflow/pipelines | sdk/python/kfp/components/structures_test.py | Python | apache-2.0 | 14,973 |
#!/usr/bin/python
from pycbc.scheme import *
from pycbc.types import *
from pycbc.waveform import *
import pycbc
from optparse import OptionParser
from math import sin, log
import gc
parser = OptionParser()
parser.add_option('--scheme','-s', type = 'choice',
choices = ('cpu','cuda','opencl'),
default = 'cpu', dest = 'scheme',
help = 'specifies processing scheme, can be cpu [default], cuda, or opencl')
parser.add_option('--device-num','-d', action='store', type = 'int',
dest = 'devicenum', default=0,
help = 'specifies a GPU device to use for CUDA or OpenCL, 0 by default')
parser.add_option('--approximant', type=str, default="TaylorF2")
parser.add_option('--deltaf',type=float, help='frequency step')
parser.add_option('--iterations', type=int, help='Number of iterations to perform')
(options, args) = parser.parse_args()
#Changing the optvalues to a dict makes them easier to read
_options = vars(options)
if _options['scheme'] == 'cpu':
ctx = CPUScheme()
if _options['scheme'] == 'cuda':
ctx = CUDAScheme(device_num=_options['devicenum'])
if _options['scheme'] == 'opencl':
ctx = OpenCLScheme(device_num=_options['devicenum'])
niter = options.iterations
if type(ctx) is CUDAScheme:
print("RUNNING ON ", ctx.device.name())
else:
print("RUNNING ON CPU")
with ctx:
wf_taylor = get_fd_waveform(mass1=1, mass2=1, f_lower=14,
approximant=options.approximant, delta_f=options.deltaf)
def taylorf2():
with ctx:
for i in range(0,niter):
wf_taylor = get_fd_waveform(mass1=1, mass2=1, f_lower=14,
approximant=options.approximant, delta_f=options.deltaf)
import timeit
gt = timeit.Timer(taylorf2)
t = (1000 * gt.timeit(number=1)/niter)
print("Waveform Generation %.2f msec" % t, " %5.1f gen/min " % (1000 *60 /t))
if type(ctx) is CUDAScheme:
def SPAtmplt():
with ctx:
n = int(1.0 / options.deltaf * 4096)
out = zeros(n, dtype=complex64)
for i in range(0,niter):
wf_taylor = get_fd_waveform(mass1=1, mass2=1, f_lower=14,
approximant="SPAtmplt", delta_f=options.deltaf, out=out, amplitude_order=0)
gt = timeit.Timer(SPAtmplt)
t = (1000 * gt.timeit(number=1)/niter)
print("SPAtmplt Generation %.2f msec" % t, " %5.1f gen/min " % (1000 *60 /t))
| stevereyes01/pycbc | tools/timing/wav_perf.py | Python | gpl-3.0 | 2,498 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_vrf
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VRFs on Arista EOS network devices
description:
- This module provides declarative management of VRFs
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the VRF.
required: true
rd:
description:
- Route distinguisher of the VRF
interfaces:
description:
- Identifies the set of interfaces that
should be configured in the VRF. Interfaces must be routed
interfaces in order to be placed into a VRF. The name of interface
should be in expanded format and not abbreviated.
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vrf C(name)
for associated interfaces. If the value in the C(associated_interfaces) does not match with
the operational state of vrf interfaces on device it will result in failure.
version_added: "2.5"
aggregate:
description: List of VRFs definitions
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
default: no
type: bool
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state arguments.
default: 10
state:
description:
- State of the VRF configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: Create vrf
eos_vrf:
name: test
rd: 1:200
interfaces:
- Ethernet2
state: present
- name: Delete VRFs
eos_vrf:
name: test
state: absent
- name: Create aggregate of VRFs with purge
eos_vrf:
aggregate:
- { name: test4, rd: "1:204" }
- { name: test5, rd: "1:205" }
state: present
purge: yes
- name: Delete aggregate of VRFs
eos_vrf:
aggregate:
- name: test2
- name: test3
- name: test4
- name: test5
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- vrf definition test
- rd 1:100
- interface Ethernet1
- vrf forwarding test
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.eos.eos import load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
purge = module.params['purge']
for w in want:
name = w['name']
rd = w['rd']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent':
if obj_in_have:
commands.append('no vrf definition %s' % name)
elif state == 'present':
if not obj_in_have:
commands.append('vrf definition %s' % name)
if rd is not None:
commands.append('rd %s' % rd)
if w['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
else:
if w['rd'] is not None and w['rd'] != obj_in_have['rd']:
commands.append('vrf definition %s' % w['name'])
commands.append('rd %s' % w['rd'])
if w['interfaces']:
if not obj_in_have['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
elif set(w['interfaces']) != obj_in_have['interfaces']:
missing_interfaces = list(set(w['interfaces']) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['name'], want)
if not obj_in_want:
commands.append('no vrf definition %s' % h['name'])
return commands
def map_config_to_obj(module):
objs = []
output = run_commands(module, {'command': 'show vrf', 'output': 'text'})
lines = output[0].strip().splitlines()[3:]
out_len = len(lines)
index = 0
while out_len > index:
line = lines[index]
if not line:
continue
splitted_line = re.split(r'\s{2,}', line.strip())
if len(splitted_line) == 1:
index += 1
continue
else:
obj = dict()
obj['name'] = splitted_line[0]
obj['rd'] = splitted_line[1]
obj['interfaces'] = []
if len(splitted_line) > 4:
obj['interfaces'] = []
interfaces = splitted_line[4]
if interfaces.endswith(','):
while interfaces.endswith(','):
# gather all comma separated interfaces
if out_len <= index:
break
index += 1
line = lines[index]
vrf_line = re.split(r'\s{2,}', line.strip())
interfaces += vrf_line[-1]
for i in interfaces.split(','):
obj['interfaces'].append(i.strip().lower())
index += 1
objs.append(obj)
return objs
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
if item.get('interfaces'):
item['interfaces'] = [intf.replace(" ", "").lower() for intf in item.get('interfaces') if intf]
if item.get('associated_interfaces'):
item['associated_interfaces'] = [intf.replace(" ", "").lower() for intf in item.get('associated_interfaces') if intf]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'state': module.params['state'],
'rd': module.params['rd'],
'interfaces': [intf.replace(" ", "").lower() for intf in module.params['interfaces']] if module.params['interfaces'] else [],
'associated_interfaces': [intf.replace(" ", "").lower() for intf in
module.params['associated_interfaces']] if module.params['associated_interfaces'] else []
})
return obj
def check_declarative_intent_params(want, module, result):
have = None
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(module)
for i in w['associated_interfaces']:
obj_in_have = search_obj_in_list(w['name'], have)
if obj_in_have:
interfaces = obj_in_have.get('interfaces')
if interfaces is not None and i not in interfaces:
module.fail_json(msg="Interface %s not configured on vrf %s" % (i, w['name']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
rd=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
check_declarative_intent_params(want, module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| hyperized/ansible | lib/ansible/modules/network/eos/eos_vrf.py | Python | gpl-3.0 | 10,723 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from optparse import make_option
import sys
class GenericCommand(BaseCommand):
''' paloma postfix management
'''
args = ''
help = ''
model = None
option_list = BaseCommand.option_list + (
make_option(
'--id',
action='store',
dest='id',
default=None,
help=u'entity id(message,user,...'
),
make_option(
'-s',
'--sync',
action='store_true',
dest='sync',
default=False,
help=u'Synchronous Call'),
make_option(
'--file',
action='store',
dest='file',
default='stdin',
help=u'flle'),
make_option(
'--description',
action='store',
dest='description',
default=None,
help=u'Description'),
make_option(
'--eta',
action='store',
dest='eta',
default=None,
help=u'Estimated Time of Arrival'),
make_option(
'--encoding',
action='store',
dest='encoding',
default='utf-8',
help=u'encoding'),
make_option(
'-d', '--dryrun',
action='store_true',
dest='dryrun', default=False,
help=u'''False(default): modify data on storages,
True: print data to console out
'''),
make_option(
'--async',
action='store',
dest='async',
default=True,
help=u'Asynchronos Execution'),
)
''' Command Option '''
def open_file(self, options):
fp = sys.stdin if options['file'] == 'stdin' else open(options['file'])
return fp
def handle_count(self, *args, **option):
if self.model:
print self.model, self.model.objects.count()
def handle_help(self, *args, **options):
''' help
'''
import re
for i in dir(self):
m = re.search('^handle_(.*)$', i)
if m is None:
continue
print "subcommand:", m.group(1)
print args
print options
def handle(self, *args, **options):
''' command main '''
if len(args) < 1:
self.handle_help(*args, **options)
return "a sub command must be specfied"
self.command = args[0]
getattr(self,
'handle_%s' % self.command,
GenericCommand.handle_help)(*args[1:], **options)
| hdknr/paloma | src/paloma/management/commands/__init__.py | Python | bsd-2-clause | 2,692 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.compiler.jit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.compiler import jit
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
_REGISTERED_OPS = op_def_registry.get_registered_ops()
def enable_jit_nonstateful(node_def):
try:
return not _REGISTERED_OPS[node_def.op].is_stateful
except KeyError:
raise ValueError("Unregistered op being created: %s" % node_def)
class JITTest(test.TestCase):
def compute(self, use_jit, compute_fn):
random_seed.set_random_seed(1234)
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(use_jit):
r = compute_fn()
sess.run(variables.global_variables_initializer())
return (r, sess.run(r))
def testJITCreateOpsLambda(self):
"""Test several ways of customizing the compilation attribute."""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = random_ops.random_uniform((1,), seed=1)
return inputs
v_false_1_t, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
v_true_1_t, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
v_all_true_t, _ = self.compute(True, create_ops)
self.assertFalse(v_false_1_t.op.get_attr("_XlaCompile"))
v_true_1_t_sampler_op = v_true_1_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
v_all_true_t_sampler_op = v_all_true_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
self.assertFalse(v_true_1_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_true_1_t.op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t.op.get_attr("_XlaCompile"))
# Additionally ensure that where no JIT compilation happens on the
# random_uniform op, the output values are identical to the case
# where no JIT compilation happens anywhere.
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
def testJITXlaScope(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
# XlaScope 0
a1 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope 1
a2 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 1
a3 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 2
a4 = constant_op.constant(1)
# XlaScope still 1, depth 1
a5 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope now 2, depth 0
a6 = constant_op.constant(1)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a3.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a4.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a5.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_2", a6.op.get_attr("_XlaScope"))
def testJITVariableSeed(self):
"""Test that the stateful initializer is not marked for compilation.
XLA does not currently support seeded initialization and XLA initializers
therefore return different values than non-XLA counterparts. Here
we ensure that if we can disable JIT compilation for the initializers and
get the same variable values as if no JIT compilation happened.
"""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable("var", (1,))
return inputs
_, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
_, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
def testDefunNoJitScope(self):
with self.session(graph=ops.Graph()):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
# No enclosing jit scope so function sets its own value for _XlaScope.
self.assertEqual(b"function_mulop", func_attrs["_XlaScope"].s)
def testDefunInheritsJitScope(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
# Ensure _XlaScope is inherited from enclosing context.
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
class CompilationEnabledInGradientTest(test.TestCase):
def testCompilationInGradient(self):
with self.cached_session():
x = constant_op.constant([[3.]])
y_nc = math_ops.matmul(x, x, name="not_compiled")
with jit.experimental_jit_scope():
y_c = math_ops.matmul(y_nc, y_nc, name="compiled")
x_grads = gradients.gradients([y_c], [x])[0]
operations = x.graph.get_operations()
c_grad_ops = [
op for op in operations if "gradients/compiled" in op.name]
nc_grad_ops = [
op for op in operations if "gradients/not_compiled" in op.name]
self.assertGreater(len(c_grad_ops), 0)
self.assertGreater(len(nc_grad_ops), 0)
for cg in c_grad_ops:
self.assertTrue(cg.get_attr("_XlaCompile"))
for ncg in nc_grad_ops:
with self.assertRaisesRegexp(ValueError, "[Nn]o attr named"):
ncg.get_attr("_XlaCompile")
# d/dx (x ** 4) = 4 * (x ** 3)
self.assertAllClose([[108]], x_grads.eval())
def testCompilationGradientScopeNames(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope():
# XlaScope 0
a1 = constant_op.constant([[1.]])
a1t = math_ops.matmul(a1, a1)
with jit.experimental_jit_scope():
# XlaScope 1
a2 = constant_op.constant([[1.]])
a2t = math_ops.matmul(a2, a2)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", grad_a2.op.get_attr("_XlaScope"))
def testCompilationSeparateGradientScopeNames(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 0
a1 = constant_op.constant([[1.]])
a1t = math_ops.matmul(a1, a1)
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 1
a2 = constant_op.constant([[1.]])
a2t = math_ops.matmul(a2, a2)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1_grad_GB",
grad_a2.op.get_attr("_XlaScope"))
def testPlaysNicelyWithDefun(self):
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with the same
# _XlaScope as the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
def testPlaysNicelyWithDefunSeparateGradientScope(self):
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True):
@function.Defun(
compiled=True, noinline=True, separate_compiled_gradients=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with a different
# _XlaScope from the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
if __name__ == "__main__":
test.main()
| kobejean/tensorflow | tensorflow/contrib/compiler/jit_test.py | Python | apache-2.0 | 12,205 |
# encoding: utf-8
def _unicode_truncate(ustr, length, encoding="UTF-8"):
"Truncate @ustr to specific encoded byte length"
bstr = ustr.encode(encoding)[:length]
return bstr.decode(encoding, 'ignore')
def extract_title_body(text, maxtitlelen=60):
"""Prepare @text: Return a (title, body) tuple
@text: A user-submitted paragraph or otherwise snippet of text. We
try to detect an obvious title and then return the title and the
following body. Otherwise we extract a title from the first words,
and return the full text as body.
@maxtitlelen: A unitless measure of approximate length of title.
The default value yields a resulting title of approximately 60 ascii
characters, or 20 asian characters.
>>> extract_title_body("Short Text")
('Short Text', '')
>>> title, body = extract_title_body(u"執筆方針については、項目名の付け方、"
... "フォーマットや表記上の諸問題に関して多くの方針が存在している。")
>>> print(title)
執筆方針については、項目名の付け方、フォ
>>> print(body) # doctest: +ELLIPSIS
執筆方針については、項目名の付け方、フォ...して多くの方針が存在している。
"""
# if you don't make real tests, it's not not worth doing it at all.
if not text.strip():
return text, ""
def split_first_line(text):
"""Take first non-empty line of text"""
lines = iter(text.splitlines())
for l in lines:
l = l.strip()
if not l:
continue
rest = "\n".join(lines)
return l, rest
# We use the UTF-8 encoding and truncate due to it:
# this is a good heuristic for ascii vs "wide characters"
# it results in taking fewer characters if they are asian, which
# is exactly what we want
def split_first_words(text, maxlen):
text = text.lstrip()
first_text = _unicode_truncate(text, maxlen)
words = first_text.split()
if len(words) > 3:
words = words[:-1]
first_words = " ".join(words[:-1])
if text.startswith(first_words):
first_text = first_words
rest_text = text[len(first_text):]
return first_text, rest_text
firstline, rest = split_first_line(text)
if len(firstline.encode("UTF-8")) > maxtitlelen:
firstline, rest = split_first_words(text, maxtitlelen)
else:
return firstline, rest
if rest.strip():
return firstline, text
else:
return text, ""
if __name__ == '__main__':
import doctest
doctest.testmod()
| engla/kupfer | kupfer/textutils.py | Python | gpl-3.0 | 2,681 |
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
.. deprecated:: 1.9
This module has been deprecated. Please use `cf_units
<https://github.com/SciTools/cf_units>`_ instead.
Units of measure.
Provision of a wrapper class to support Unidata/UCAR UDUNITS-2, and the
netcdftime calendar functionality.
See also: `UDUNITS-2
<http://www.unidata.ucar.edu/software/udunits/udunits-2/udunits2.html>`_.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from contextlib import contextmanager
import copy
import ctypes
import ctypes.util
import os.path
import sys
import warnings
import netcdftime
import numpy as np
import iris.config
import iris.util
warnings.warn('iris.unit is deprecated in Iris v1.9. Please use cf_units '
'(https://github.com/SciTools/cf_units) instead.')
__all__ = ['Unit', 'date2num', 'decode_time', 'encode_clock', 'encode_date',
'encode_time', 'num2date']
########################################################################
#
# module level constants
#
########################################################################
#
# default constants
#
IRIS_EPOCH = '1970-01-01 00:00:00'
_STRING_BUFFER_DEPTH = 128
_UNKNOWN_UNIT_STRING = 'unknown'
_UNKNOWN_UNIT_SYMBOL = '?'
_UNKNOWN_UNIT = [_UNKNOWN_UNIT_STRING, _UNKNOWN_UNIT_SYMBOL, '???', '']
_NO_UNIT_STRING = 'no_unit'
_NO_UNIT_SYMBOL = '-'
_NO_UNIT = [_NO_UNIT_STRING, _NO_UNIT_SYMBOL, 'no unit', 'no-unit', 'nounit']
_UNIT_DIMENSIONLESS = '1'
_OP_SINCE = ' since '
_CATEGORY_UNKNOWN, _CATEGORY_NO_UNIT, _CATEGORY_UDUNIT = range(3)
#
# libudunits2 constants
#
# ut_status enumerations
_UT_STATUS = ['UT_SUCCESS', 'UT_BAD_ARG', 'UT_EXISTS', 'UT_NO_UNIT',
'UT_OS', 'UT_NOT_SAME_NAME', 'UT_MEANINGLESS', 'UT_NO_SECOND',
'UT_VISIT_ERROR', 'UT_CANT_FORMAT', 'UT_SYNTAX', 'UT_UNKNOWN',
'UT_OPEN_ARG', 'UT_OPEN_ENV', 'UT_OPEN_DEFAULT', 'UT_PARSE']
# explicit function names
_UT_HANDLER = 'ut_set_error_message_handler'
_UT_IGNORE = 'ut_ignore'
# ut_encoding enumerations
UT_ASCII = 0
UT_ISO_8859_1 = 1
UT_LATIN1 = 1
UT_UTF8 = 2
UT_NAMES = 4
UT_DEFINITION = 8
UT_FORMATS = [UT_ASCII, UT_ISO_8859_1, UT_LATIN1, UT_UTF8, UT_NAMES,
UT_DEFINITION]
#
# netcdftime constants
#
CALENDAR_STANDARD = 'standard'
CALENDAR_GREGORIAN = 'gregorian'
CALENDAR_PROLEPTIC_GREGORIAN = 'proleptic_gregorian'
CALENDAR_NO_LEAP = 'noleap'
CALENDAR_JULIAN = 'julian'
CALENDAR_ALL_LEAP = 'all_leap'
CALENDAR_365_DAY = '365_day'
CALENDAR_366_DAY = '366_day'
CALENDAR_360_DAY = '360_day'
CALENDARS = [CALENDAR_STANDARD, CALENDAR_GREGORIAN,
CALENDAR_PROLEPTIC_GREGORIAN, CALENDAR_NO_LEAP, CALENDAR_JULIAN,
CALENDAR_ALL_LEAP, CALENDAR_365_DAY, CALENDAR_366_DAY,
CALENDAR_360_DAY]
#
# ctypes types
#
FLOAT32 = ctypes.c_float
FLOAT64 = ctypes.c_double
########################################################################
#
# module level variables
#
########################################################################
# cache for ctypes foreign shared library handles
_lib_c = None
_lib_ud = None
_ud_system = None
# cache for libc shared library functions
_strerror = None
# class cache for libudunits2 shared library functions
_cv_convert_float = None
_cv_convert_floats = None
_cv_convert_double = None
_cv_convert_doubles = None
_cv_free = None
_ut_are_convertible = None
_ut_clone = None
_ut_compare = None
_ut_decode_time = None
_ut_divide = None
_ut_encode_clock = None
_ut_encode_date = None
_ut_encode_time = None
_ut_format = None
_ut_free = None
_ut_get_converter = None
_ut_get_status = None
_ut_get_unit_by_name = None
_ut_ignore = None
_ut_invert = None
_ut_is_dimensionless = None
_ut_log = None
_ut_multiply = None
_ut_offset = None
_ut_offset_by_time = None
_ut_parse = None
_ut_raise = None
_ut_read_xml = None
_ut_root = None
_ut_scale = None
_ut_set_error_message_handler = None
########################################################################
#
# module level statements
#
########################################################################
#
# load the libc shared library
#
if _lib_c is None:
if sys.platform == 'win32':
_lib_c = ctypes.cdll.msvcrt
else:
_lib_c = ctypes.CDLL(ctypes.util.find_library('libc'))
#
# cache common shared library functions
#
_strerror = _lib_c.strerror
_strerror.restype = ctypes.c_char_p
#
# load the libudunits2 shared library
#
if _lib_ud is None:
_lib_ud = iris.config.get_option(
'System', 'udunits2_path',
default=ctypes.util.find_library('udunits2'))
_lib_ud = ctypes.CDLL(_lib_ud, use_errno=True)
#
# cache common shared library functions
#
_cv_convert_float = _lib_ud.cv_convert_float
_cv_convert_float.argtypes = [ctypes.c_void_p, ctypes.c_float]
_cv_convert_float.restype = ctypes.c_float
_cv_convert_floats = _lib_ud.cv_convert_floats
_cv_convert_floats.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_ulong, ctypes.c_void_p]
_cv_convert_floats.restype = ctypes.c_void_p
_cv_convert_double = _lib_ud.cv_convert_double
_cv_convert_double.argtypes = [ctypes.c_void_p, ctypes.c_double]
_cv_convert_double.restype = ctypes.c_double
_cv_convert_doubles = _lib_ud.cv_convert_doubles
_cv_convert_doubles.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_ulong, ctypes.c_void_p]
_cv_convert_doubles.restype = ctypes.c_void_p
_cv_free = _lib_ud.cv_free
_cv_free.argtypes = [ctypes.c_void_p]
_ut_are_convertible = _lib_ud.ut_are_convertible
_ut_are_convertible.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_clone = _lib_ud.ut_clone
_ut_clone.argtypes = [ctypes.c_void_p]
_ut_clone.restype = ctypes.c_void_p
_ut_compare = _lib_ud.ut_compare
_ut_compare.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_compare.restype = ctypes.c_int
_ut_decode_time = _lib_ud.ut_decode_time
_ut_decode_time.restype = None
_ut_divide = _lib_ud.ut_divide
_ut_divide.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_divide.restype = ctypes.c_void_p
_ut_encode_clock = _lib_ud.ut_encode_clock
_ut_encode_clock.restype = ctypes.c_double
_ut_encode_date = _lib_ud.ut_encode_date
_ut_encode_date.restype = ctypes.c_double
_ut_encode_time = _lib_ud.ut_encode_time
_ut_encode_time.restype = ctypes.c_double
_ut_format = _lib_ud.ut_format
_ut_format.argtypes = [ctypes.c_void_p, ctypes.c_char_p,
ctypes.c_ulong, ctypes.c_uint]
_ut_free = _lib_ud.ut_free
_ut_free.argtypes = [ctypes.c_void_p]
_ut_free.restype = None
_ut_get_converter = _lib_ud.ut_get_converter
_ut_get_converter.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_get_converter.restype = ctypes.c_void_p
_ut_get_status = _lib_ud.ut_get_status
_ut_get_unit_by_name = _lib_ud.ut_get_unit_by_name
_ut_get_unit_by_name.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
_ut_get_unit_by_name.restype = ctypes.c_void_p
_ut_invert = _lib_ud.ut_invert
_ut_invert.argtypes = [ctypes.c_void_p]
_ut_invert.restype = ctypes.c_void_p
_ut_is_dimensionless = _lib_ud.ut_is_dimensionless
_ut_is_dimensionless.argtypes = [ctypes.c_void_p]
_ut_log = _lib_ud.ut_log
_ut_log.argtypes = [ctypes.c_double, ctypes.c_void_p]
_ut_log.restype = ctypes.c_void_p
_ut_multiply = _lib_ud.ut_multiply
_ut_multiply.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
_ut_multiply.restype = ctypes.c_void_p
_ut_offset = _lib_ud.ut_offset
_ut_offset.argtypes = [ctypes.c_void_p, ctypes.c_double]
_ut_offset.restype = ctypes.c_void_p
_ut_offset_by_time = _lib_ud.ut_offset_by_time
_ut_offset_by_time.argtypes = [ctypes.c_void_p, ctypes.c_double]
_ut_offset_by_time.restype = ctypes.c_void_p
_ut_parse = _lib_ud.ut_parse
_ut_parse.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]
_ut_parse.restype = ctypes.c_void_p
_ut_raise = _lib_ud.ut_raise
_ut_raise.argtypes = [ctypes.c_void_p, ctypes.c_int]
_ut_raise.restype = ctypes.c_void_p
_ut_read_xml = _lib_ud.ut_read_xml
_ut_read_xml.argtypes = [ctypes.c_char_p]
_ut_read_xml.restype = ctypes.c_void_p
_ut_root = _lib_ud.ut_root
_ut_root.argtypes = [ctypes.c_void_p, ctypes.c_int]
_ut_root.restype = ctypes.c_void_p
_ut_scale = _lib_ud.ut_scale
_ut_scale.argtypes = [ctypes.c_double, ctypes.c_void_p]
_ut_scale.restype = ctypes.c_void_p
# convenience dictionary for the Unit convert method
_cv_convert_scalar = {FLOAT32: _cv_convert_float,
FLOAT64: _cv_convert_double}
_cv_convert_array = {FLOAT32: _cv_convert_floats,
FLOAT64: _cv_convert_doubles}
_numpy2ctypes = {np.float32: FLOAT32, np.float64: FLOAT64}
_ctypes2numpy = {v: k for k, v in _numpy2ctypes.items()}
#
# load the UDUNITS-2 xml-formatted unit-database
#
if not _ud_system:
_func_type = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p,
use_errno=True)
_set_handler_type = ctypes.CFUNCTYPE(_func_type, _func_type)
_ut_set_error_message_handler = _set_handler_type((_UT_HANDLER, _lib_ud))
_ut_ignore = _func_type((_UT_IGNORE, _lib_ud))
# ignore standard UDUNITS-2 start-up preamble redirected to stderr stream
_default_handler = _ut_set_error_message_handler(_ut_ignore)
# Load the unit-database from the default location (modified via
# the UDUNITS2_XML_PATH environment variable) and if that fails look
# relative to sys.prefix to support environments such as conda.
_ud_system = _ut_read_xml(None)
if _ud_system is None:
_alt_xml_path = os.path.join(sys.prefix, 'share',
'udunits', 'udunits2.xml')
_ud_system = _ut_read_xml(_alt_xml_path.encode())
# reinstate old error handler
_ut_set_error_message_handler(_default_handler)
del _func_type
if not _ud_system:
_status_msg = 'UNKNOWN'
_error_msg = ''
_status = _ut_get_status()
try:
_status_msg = _UT_STATUS[_status]
except IndexError:
pass
_errno = ctypes.get_errno()
if _errno != 0:
_error_msg = ': "%s"' % _strerror(_errno)
ctypes.set_errno(0)
raise OSError('[%s] Failed to open UDUNITS-2 XML unit database %s' % (
_status_msg, _error_msg))
########################################################################
#
# module level function definitions
#
########################################################################
def encode_time(year, month, day, hour, minute, second):
"""
Return date/clock time encoded as a double precision value.
Encoding performed using UDUNITS-2 hybrid Gregorian/Julian calendar.
Dates on or after 1582-10-15 are assumed to be Gregorian dates;
dates before that are assumed to be Julian dates. In particular, the
year 1 BCE is immediately followed by the year 1 CE.
Args:
* year (int):
Year value to be encoded.
* month (int):
Month value to be encoded.
* day (int):
Day value to be encoded.
* hour (int):
Hour value to be encoded.
* minute (int):
Minute value to be encoded.
* second (int):
Second value to be encoded.
Returns:
float.
For example:
>>> import cf_units as unit
>>> unit.encode_time(1970, 1, 1, 0, 0, 0)
-978307200.0
"""
return _ut_encode_time(ctypes.c_int(year), ctypes.c_int(month),
ctypes.c_int(day), ctypes.c_int(hour),
ctypes.c_int(minute), ctypes.c_double(second))
def encode_date(year, month, day):
"""
Return date encoded as a double precision value.
Encoding performed using UDUNITS-2 hybrid Gergorian/Julian calendar.
Dates on or after 1582-10-15 are assumed to be Gregorian dates;
dates before that are assumed to be Julian dates. In particular, the
year 1 BCE is immediately followed by the year 1 CE.
Args:
* year (int):
Year value to be encoded.
* month (int):
Month value to be encoded.
* day (int):
Day value to be encoded.
Returns:
float.
For example:
>>> import cf_units as unit
>>> unit.encode_date(1970, 1, 1)
-978307200.0
"""
return _ut_encode_date(ctypes.c_int(year), ctypes.c_int(month),
ctypes.c_int(day))
def encode_clock(hour, minute, second):
"""
Return clock time encoded as a double precision value.
Args:
* hour (int):
Hour value to be encoded.
* minute (int):
Minute value to be encoded.
* second (int):
Second value to be encoded.
Returns:
float.
For example:
>>> import cf_units as unit
>>> unit.encode_clock(0, 0, 0)
0.0
"""
return _ut_encode_clock(ctypes.c_int(hour), ctypes.c_int(minute),
ctypes.c_double(second))
def decode_time(time):
"""
Decode a double precision date/clock time value into its component
parts and return as tuple.
Decode time into it's year, month, day, hour, minute, second, and
resolution component parts. Where resolution is the uncertainty of
the time in seconds.
Args:
* time (float): Date/clock time encoded as a double precision value.
Returns:
tuple of (year, month, day, hour, minute, second, resolution).
For example:
>>> import cf_units as unit
>>> unit.decode_time(unit.encode_time(1970, 1, 1, 0, 0, 0))
(1970, 1, 1, 0, 0, 0.0, 1.086139178596568e-07)
"""
year = ctypes.c_int()
month = ctypes.c_int()
day = ctypes.c_int()
hour = ctypes.c_int()
minute = ctypes.c_int()
second = ctypes.c_double()
resolution = ctypes.c_double()
_ut_decode_time(ctypes.c_double(time), ctypes.pointer(year),
ctypes.pointer(month), ctypes.pointer(day),
ctypes.pointer(hour), ctypes.pointer(minute),
ctypes.pointer(second), ctypes.pointer(resolution))
return (year.value, month.value, day.value, hour.value, minute.value,
second.value, resolution.value)
def julian_day2date(julian_day, calendar):
"""
Return a netcdftime datetime-like object representing the Julian day.
If calendar is 'standard' or 'gregorian', Julian day follows
Julian calendar on and before 1582-10-5, Gregorian calendar after
1582-10-15.
If calendar is 'proleptic_gregorian', Julian Day follows Gregorian
calendar.
If calendar is 'julian', Julian Day follows Julian calendar.
The datetime object is a 'real' datetime object if the date falls in
the Gregorian calendar (i.e. calendar is 'proleptic_gregorian', or
calendar is 'standard'/'gregorian' and the date is after 1582-10-15).
Otherwise, it's a 'phony' datetime object which is actually an instance
of netcdftime.datetime.
Algorithm:
Meeus, Jean (1998) Astronomical Algorithms (2nd Edition).
Willmann-Bell, Virginia. p. 63.
Args:
* julian_day (float):
Julian day with a resolution of 1 second.
* calendar (string):
Name of the calendar, see cf_units.CALENDARS.
Returns:
datetime or netcdftime.datetime.
For example:
>>> import cf_units as unit
>>> import datetime
>>> unit.julian_day2date(
... unit.date2julian_day(datetime.datetime(1970, 1, 1, 0, 0, 0),
... unit.CALENDAR_STANDARD),
... unit.CALENDAR_STANDARD)
datetime.datetime(1970, 1, 1, 0, 0)
"""
return netcdftime.DateFromJulianDay(julian_day, calendar)
def date2julian_day(date, calendar):
"""
Return the Julian day (resolution of 1 second) from a netcdftime
datetime-like object.
If calendar is 'standard' or 'gregorian', Julian day follows Julian
calendar on and before 1582-10-5, Gregorian calendar after 1582-10-15.
If calendar is 'proleptic_gregorian', Julian day follows Gregorian
calendar.
If calendar is 'julian', Julian day follows Julian calendar.
Algorithm:
Meeus, Jean (1998) Astronomical Algorithms (2nd Edition).
Willmann-Bell, Virginia. p. 63.
Args:
* date (netcdftime.date):
Date and time representation.
* calendar (string):
Name of the calendar, see cf_units.CALENDARS.
Returns:
float.
For example:
>>> import cf_units as unit
>>> import datetime
>>> unit.date2julian_day(datetime.datetime(1970, 1, 1, 0, 0, 0),
... unit.CALENDAR_STANDARD)
2440587.5
"""
return netcdftime.JulianDayFromDate(date, calendar)
def date2num(date, unit, calendar):
"""
Return numeric time value (resolution of 1 second) encoding of
datetime object.
The units of the numeric time values are described by the unit and
calendar arguments. The datetime objects must be in UTC with no
time-zone offset. If there is a time-zone offset in unit, it will be
applied to the returned numeric values.
Like the :func:`matplotlib.dates.date2num` function, except that it allows
for different units and calendars. Behaves the same as if
unit = 'days since 0001-01-01 00:00:00' and
calendar = 'proleptic_gregorian'.
Args:
* date (datetime):
A datetime object or a sequence of datetime objects.
The datetime objects should not include a time-zone offset.
* unit (string):
A string of the form '<time-unit> since <time-origin>' describing
the time units. The <time-unit> can be days, hours, minutes or seconds.
The <time-origin> is a date/time reference point. A valid choice
would be unit='hours since 1800-01-01 00:00:00 -6:00'.
* calendar (string):
Name of the calendar, see cf_units.CALENDARS.
Returns:
float, or numpy.ndarray of float.
For example:
>>> import cf_units as unit
>>> import datetime
>>> dt1 = datetime.datetime(1970, 1, 1, 6, 0, 0)
>>> dt2 = datetime.datetime(1970, 1, 1, 7, 0, 0)
>>> unit.date2num(dt1, 'hours since 1970-01-01 00:00:00',
... unit.CALENDAR_STANDARD)
6.0
>>> unit.date2num([dt1, dt2], 'hours since 1970-01-01 00:00:00',
... unit.CALENDAR_STANDARD)
array([ 6., 7.])
"""
#
# ensure to strip out any 'UTC' postfix which is generated by
# UDUNITS-2 formatted output and causes the netcdftime parser
# to choke
#
unit_string = unit.rstrip(" UTC")
if unit_string.endswith(" since epoch"):
unit_string = unit_string.replace("epoch", IRIS_EPOCH)
cdftime = netcdftime.utime(unit_string, calendar=calendar)
return cdftime.date2num(date)
def num2date(time_value, unit, calendar):
"""
Return datetime encoding of numeric time value (resolution of 1 second).
The units of the numeric time value are described by the unit and
calendar arguments. The returned datetime object represent UTC with
no time-zone offset, even if the specified unit contain a time-zone
offset.
Like the :func:`matplotlib.dates.num2date` function, except that it allows
for different units and calendars. Behaves the same if
unit = 'days since 001-01-01 00:00:00'}
calendar = 'proleptic_gregorian'.
The datetime instances returned are 'real' python datetime
objects if the date falls in the Gregorian calendar (i.e.
calendar='proleptic_gregorian', or calendar = 'standard' or 'gregorian'
and the date is after 1582-10-15). Otherwise, they are 'phony' datetime
objects which support some but not all the methods of 'real' python
datetime objects. This is because the python datetime module cannot
use the 'proleptic_gregorian' calendar, even before the switch
occured from the Julian calendar in 1582. The datetime instances
do not contain a time-zone offset, even if the specified unit
contains one.
Args:
* time_value (float):
Numeric time value/s. Maximum resolution is 1 second.
* unit (sting):
A string of the form '<time-unit> since <time-origin>'
describing the time units. The <time-unit> can be days, hours,
minutes or seconds. The <time-origin> is the date/time reference
point. A valid choice would be
unit='hours since 1800-01-01 00:00:00 -6:00'.
* calendar (string):
Name of the calendar, see cf_units.CALENDARS.
Returns:
datetime, or numpy.ndarray of datetime object.
For example:
>>> import cf_units as unit
>>> import datetime
>>> unit.num2date(6, 'hours since 1970-01-01 00:00:00',
... unit.CALENDAR_STANDARD)
datetime.datetime(1970, 1, 1, 6, 0)
>>> unit.num2date([6, 7], 'hours since 1970-01-01 00:00:00',
... unit.CALENDAR_STANDARD)
array([datetime.datetime(1970, 1, 1, 6, 0),
datetime.datetime(1970, 1, 1, 7, 0)], dtype=object)
"""
#
# ensure to strip out any 'UTC' postfix which is generated by
# UDUNITS-2 formatted output and causes the netcdftime parser
# to choke
#
unit_string = unit.rstrip(" UTC")
if unit_string.endswith(" since epoch"):
unit_string = unit_string.replace("epoch", IRIS_EPOCH)
cdftime = netcdftime.utime(unit_string, calendar=calendar)
return cdftime.num2date(time_value)
def _handler(func):
"""Set the error message handler."""
_ut_set_error_message_handler(func)
@contextmanager
def suppress_unit_warnings():
"""
Suppresses all warnings raised because of invalid units in loaded data.
"""
# Suppress any warning messages raised by UDUNITS2.
_func_type = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p,
use_errno=True)
_set_handler_type = ctypes.CFUNCTYPE(_func_type, _func_type)
_ut_set_error_message_handler = _set_handler_type((_UT_HANDLER, _lib_ud))
_ut_ignore = _func_type((_UT_IGNORE, _lib_ud))
_default_handler = _ut_set_error_message_handler(_ut_ignore)
with warnings.catch_warnings():
# Also suppress invalid units warnings from the Iris loader code.
warnings.filterwarnings("ignore", message=".*invalid units")
yield
_ut_set_error_message_handler(_default_handler)
########################################################################
#
# unit wrapper class for unidata/ucar UDUNITS-2
#
########################################################################
def _Unit(category, ut_unit, calendar=None, origin=None):
unit = iris.util._OrderedHashable.__new__(Unit)
unit._init(category, ut_unit, calendar, origin)
return unit
_CACHE = {}
def as_unit(unit):
"""
Returns a Unit corresponding to the given unit.
.. note::
If the given unit is already a Unit it will be returned unchanged.
"""
if isinstance(unit, Unit):
result = unit
else:
result = None
use_cache = isinstance(unit, six.string_types) or unit is None
if use_cache:
result = _CACHE.get(unit)
if result is None:
result = Unit(unit)
if use_cache:
_CACHE[unit] = result
return result
def is_time(unit):
"""
Determine whether the unit is a related SI Unit of time.
Args:
* unit (string/Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> unit.is_time('hours')
True
>>> unit.is_time('meters')
False
"""
return as_unit(unit).is_time()
def is_vertical(unit):
"""
Determine whether the unit is a related SI Unit of pressure or distance.
Args:
* unit (string/Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> unit.is_vertical('millibar')
True
>>> unit.is_vertical('km')
True
"""
return as_unit(unit).is_vertical()
class Unit(iris.util._OrderedHashable):
"""
A class to represent S.I. units and support common operations to
manipulate such units in a consistent manner as per UDUNITS-2.
These operations include scaling the unit, offsetting the unit by a
constant or time, inverting the unit, raising the unit by a power,
taking a root of the unit, taking a log of the unit, multiplying the
unit by a constant or another unit, dividing the unit by a constant
or another unit, comparing units, copying units and converting unit
data to single precision or double precision floating point numbers.
This class also supports time and calendar defintion and manipulation.
"""
# Declare the attribute names relevant to the _OrderedHashable behaviour.
_names = ('category', 'ut_unit', 'calendar', 'origin')
category = None
'Is this an unknown unit, a no-unit, or a UDUNITS-2 unit.'
ut_unit = None
'Reference to the ctypes quantity defining the UDUNITS-2 unit.'
calendar = None
'Represents the unit calendar name, see cf_units.CALENDARS'
origin = None
'The original string used to create this unit.'
__slots__ = ()
def __init__(self, unit, calendar=None):
"""
Create a wrapper instance for UDUNITS-2.
An optional calendar may be provided for a unit which defines a
time reference of the form '<time-unit> since <time-origin>'
i.e. unit='days since 1970-01-01 00:00:00'. For a unit that is a
time reference, the default calendar is 'standard'.
Accepted calendars are as follows,
* 'standard' or 'gregorian' - Mixed Gregorian/Julian calendar as
defined by udunits.
* 'proleptic_gregorian' - A Gregorian calendar extended to dates
before 1582-10-15. A year is a leap year if either,
1. It is divisible by 4 but not by 100, or
2. It is divisible by 400.
* 'noleap' or '365_day' - A Gregorian calendar without leap
years i.e. all years are 365 days long.
* 'all_leap' or '366_day' - A Gregorian calendar with every year
being a leap year i.e. all years are 366 days long.
* '360_day' - All years are 360 days divided into 30 day months.
* 'julian' - Proleptic Julian calendar, extended to dates after
1582-10-5. A year is a leap year if it is divisible by 4.
Args:
* unit:
Specify the unit as defined by UDUNITS-2.
* calendar (string):
Describes the calendar used in time calculations. The
default is 'standard' or 'gregorian' for a time reference
unit.
Returns:
Unit object.
Units should be set to "no_unit" for values which are strings.
Units can also be set to "unknown" (or None).
For example:
>>> from cf_units import Unit
>>> volts = Unit('volts')
>>> no_unit = Unit('no_unit')
>>> unknown = Unit('unknown')
>>> unknown = Unit(None)
"""
ut_unit = None
calendar_ = None
if unit is None:
unit = ''
else:
unit = str(unit).strip()
if unit.lower().endswith(' utc'):
unit = unit[:unit.lower().rfind(' utc')]
if unit.endswith(" since epoch"):
unit = unit.replace("epoch", IRIS_EPOCH)
if unit.lower() in _UNKNOWN_UNIT:
# TODO - removing the option of an unknown unit. Currently
# the auto generated MOSIG rules are missing units on a
# number of phenomena which would lead to errors.
# Will be addressed by work on metadata translation.
category = _CATEGORY_UNKNOWN
unit = _UNKNOWN_UNIT_STRING
elif unit.lower() in _NO_UNIT:
category = _CATEGORY_NO_UNIT
unit = _NO_UNIT_STRING
else:
category = _CATEGORY_UDUNIT
ut_unit = _ut_parse(_ud_system, unit.encode('ascii'), UT_ASCII)
# _ut_parse returns 0 on failure
if ut_unit is None:
self._raise_error('Failed to parse unit "%s"' % unit)
if _OP_SINCE in unit.lower():
if calendar is None:
calendar_ = CALENDAR_GREGORIAN
elif isinstance(calendar, six.string_types):
if calendar.lower() in CALENDARS:
calendar_ = calendar.lower()
else:
msg = '{!r} is an unsupported calendar.'
raise ValueError(msg.format(calendar))
else:
msg = 'Expected string-like calendar argument, got {!r}.'
raise TypeError(msg.format(type(calendar)))
self._init(category, ut_unit, calendar_, unit)
def _raise_error(self, msg):
"""
Retrieve the UDUNITS-2 ut_status, the implementation-defined string
corresponding to UDUNITS-2 errno and raise generic exception.
"""
status_msg = 'UNKNOWN'
error_msg = ''
if _lib_ud:
status = _ut_get_status()
try:
status_msg = _UT_STATUS[status]
except IndexError:
pass
errno = ctypes.get_errno()
if errno != 0:
error_msg = ': "%s"' % _strerror(errno)
ctypes.set_errno(0)
raise ValueError('[%s] %s %s' % (status_msg, msg, error_msg))
# NOTE:
# "__getstate__" and "__setstate__" functions are defined here to
# provide a custom interface for Pickle
# : Pickle "normal" behaviour is just to save/reinstate the object
# dictionary
# : that won't work here, because the "ut_unit" attribute is an
# object handle
# - the corresponding udunits object only exists in the original
# invocation
def __getstate__(self):
# state capture method for Pickle.dump()
# - return the instance data needed to reconstruct a Unit value
return {'unit_text': self.origin, 'calendar': self.calendar}
def __setstate__(self, state):
# object reconstruction method for Pickle.load()
# intercept the Pickle.load() operation and call own __init__ again
# - this is to ensure a valid ut_unit attribute (as these
# handles aren't persistent)
self.__init__(state['unit_text'], calendar=state['calendar'])
def __del__(self):
# NB. If Python is terminating then the module global "_ut_free"
# may have already been deleted ... so we check before using it.
if _ut_free:
_ut_free(self.ut_unit)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def is_time(self):
"""
Determine whether this unit is a related SI Unit of time.
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('hours')
>>> u.is_time()
True
>>> v = unit.Unit('meter')
>>> v.is_time()
False
"""
if self.is_unknown() or self.is_no_unit():
result = False
else:
day = _ut_get_unit_by_name(_ud_system, b'day')
result = _ut_are_convertible(self.ut_unit, day) != 0
return result
def is_vertical(self):
"""
Determine whether the unit is a related SI Unit of pressure or
distance.
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('millibar')
>>> u.is_vertical()
True
>>> v = unit.Unit('km')
>>> v.is_vertical()
True
"""
if self.is_unknown() or self.is_no_unit():
result = False
else:
bar = _ut_get_unit_by_name(_ud_system, b'bar')
result = _ut_are_convertible(self.ut_unit, bar) != 0
if not result:
meter = _ut_get_unit_by_name(_ud_system, b'meter')
result = _ut_are_convertible(self.ut_unit, meter) != 0
return result
def is_udunits(self):
"""Return whether the unit is a vaild unit of UDUNITS."""
return self.ut_unit is not None
def is_time_reference(self):
"""
Return whether the unit is a time reference unit of the form
'<time-unit> since <time-origin>'
i.e. unit='days since 1970-01-01 00:00:00'
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('days since epoch')
>>> u.is_time_reference()
True
"""
return self.calendar is not None
def title(self, value):
"""
Return the unit value as a title string.
Args:
* value (float): Unit value to be incorporated into title string.
Returns:
string.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('hours since epoch',
... calendar=unit.CALENDAR_STANDARD)
>>> u.title(10)
'1970-01-01 10:00:00'
"""
if self.is_time_reference():
dt = self.num2date(value)
result = dt.strftime('%Y-%m-%d %H:%M:%S')
else:
result = '%s %s' % (str(value), self)
return result
@property
def modulus(self):
"""
*(read-only)* Return the modulus value of the unit.
Convenience method that returns the unit modulus value as follows,
* 'radians' - pi*2
* 'degrees' - 360.0
* Otherwise None.
Returns:
float.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('degrees')
>>> u.modulus
360.0
"""
if self == 'radians':
result = np.pi * 2
elif self == 'degrees':
result = 360.0
else:
result = None
return result
def is_convertible(self, other):
"""
Return whether two units are convertible.
Args:
* other (Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> v = unit.Unit('kilometers')
>>> u.is_convertible(v)
True
"""
other = as_unit(other)
if self.is_unknown() or self.is_no_unit() or other.is_unknown() or \
other.is_no_unit():
result = False
else:
result = (self.calendar == other.calendar and
_ut_are_convertible(self.ut_unit, other.ut_unit) != 0)
return result
def is_dimensionless(self):
"""
Return whether the unit is dimensionless.
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> u.is_dimensionless()
False
>>> u = unit.Unit('1')
>>> u.is_dimensionless()
True
"""
return (self.category == _CATEGORY_UDUNIT and
bool(_ut_is_dimensionless(self.ut_unit)))
def is_unknown(self):
"""
Return whether the unit is defined to be an *unknown* unit.
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('unknown')
>>> u.is_unknown()
True
>>> u = unit.Unit('meters')
>>> u.is_unknown()
False
"""
return self.category == _CATEGORY_UNKNOWN
def is_no_unit(self):
"""
Return whether the unit is defined to be a *no_unit* unit.
Typically, a quantity such as a string, will have no associated
unit to describe it. Such a class of quantity may be defined
using the *no_unit* unit.
Returns:
Boolean.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('no unit')
>>> u.is_no_unit()
True
>>> u = unit.Unit('meters')
>>> u.is_no_unit()
False
"""
return self.category == _CATEGORY_NO_UNIT
def format(self, option=None):
"""
Return a formatted string representation of the binary unit.
Args:
* option (cf_units.UT_FORMATS):
Set the encoding option of the formatted string representation.
Valid encoding options may be one of the following enumerations:
* Unit.UT_ASCII
* Unit.UT_ISO_8859_1
* Unit.UT_LATIN1
* Unit.UT_UTF8
* Unit.UT_NAMES
* Unit.UT_DEFINITION
Multiple options may be combined within a list. The default
option is cf_units.UT_ASCII.
Returns:
string.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> u.format()
'm'
>>> u.format(unit.UT_NAMES)
'meter'
>>> u.format(unit.UT_DEFINITION)
'm'
"""
if self.is_unknown():
return _UNKNOWN_UNIT_STRING
elif self.is_no_unit():
return _NO_UNIT_STRING
else:
bitmask = UT_ASCII
if option is not None:
if not isinstance(option, list):
option = [option]
for i in option:
bitmask |= i
string_buffer = ctypes.create_string_buffer(_STRING_BUFFER_DEPTH)
depth = _ut_format(self.ut_unit, string_buffer,
ctypes.sizeof(string_buffer), bitmask)
if depth < 0:
self._raise_error('Failed to format %r' % self)
return str(string_buffer.value.decode('ascii'))
@property
def name(self):
"""
*(read-only)* The full name of the unit.
Formats the binary unit into a string representation using
method :func:`cf_units.Unit.format` with keyword argument
option=cf_units.UT_NAMES.
Returns:
string.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('watts')
>>> u.name
'watt'
"""
return self.format(UT_NAMES)
@property
def symbol(self):
"""
*(read-only)* The symbolic representation of the unit.
Formats the binary unit into a string representation using
method :func:`cf_units.Unit.format`.
Returns:
string.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('watts')
>>> u.symbol
'W'
"""
if self.is_unknown():
result = _UNKNOWN_UNIT_SYMBOL
elif self.is_no_unit():
result = _NO_UNIT_SYMBOL
else:
result = self.format()
return result
@property
def definition(self):
"""
*(read-only)* The symbolic decomposition of the unit.
Formats the binary unit into a string representation using
method :func:`cf_units.Unit.format` with keyword argument
option=cf_units.UT_DEFINITION.
Returns:
string.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('watts')
>>> u.definition
'm2.kg.s-3'
"""
if self.is_unknown():
result = _UNKNOWN_UNIT_SYMBOL
elif self.is_no_unit():
result = _NO_UNIT_SYMBOL
else:
result = self.format(UT_DEFINITION)
return result
def offset_by_time(self, origin):
"""
Returns the time unit offset with respect to the time origin.
Args:
* origin (float): Time origin as returned by the
:func:`cf_units.encode_time` method.
Returns:
None.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('hours')
>>> u.offset_by_time(unit.encode_time(1970, 1, 1, 0, 0, 0))
Unit('hour since 1970-01-01 00:00:00.0000000 UTC')
"""
if not isinstance(origin, (float, six.integer_types)):
raise TypeError('a numeric type for the origin argument is'
' required')
ut_unit = _ut_offset_by_time(self.ut_unit, ctypes.c_double(origin))
if not ut_unit:
self._raise_error('Failed to offset %r' % self)
calendar = None
return _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
def invert(self):
"""
Invert the unit i.e. find the reciprocal of the unit, and return
the Unit result.
Returns:
Unit.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> u.invert()
Unit('meter^-1')
"""
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot invert a 'no-unit'.")
else:
ut_unit = _ut_invert(self.ut_unit)
if not ut_unit:
self._raise_error('Failed to invert %r' % self)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def root(self, root):
"""
Returns the given root of the unit.
Args:
* root (int): Value by which the unit root is taken.
Returns:
None.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters^2')
>>> u.root(2)
Unit('meter')
.. note::
Taking a fractional root of a unit is not supported.
"""
try:
root = ctypes.c_int(root)
except TypeError:
raise TypeError('An int type for the root argument'
' is required')
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot take the logarithm of a 'no-unit'.")
else:
# only update the unit if it is not scalar
if self == Unit('1'):
result = self
else:
ut_unit = _ut_root(self.ut_unit, root)
if not ut_unit:
self._raise_error('Failed to take the root of %r' % self)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def log(self, base):
"""
Returns the logorithmic unit corresponding to the given
logorithmic base.
Args:
* base (int/float): Value of the logorithmic base.
Returns:
None.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> u.log(2)
Unit('lb(re 1 meter)')
"""
try:
base = ctypes.c_double(base)
except TypeError:
raise TypeError('A numeric type for the base argument is required')
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot take the logarithm of a 'no-unit'.")
else:
ut_unit = _ut_log(base, self.ut_unit)
if not ut_unit:
msg = 'Failed to calculate logorithmic base of %r' % self
self._raise_error(msg)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def __str__(self):
"""
Returns a simple string representation of the unit.
Returns:
string.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> str(u)
'meters'
"""
return self.origin or self.name
def __repr__(self):
"""
Returns a string representation of the unit object.
Returns:
string.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> repr(u)
"Unit('meters')"
"""
if self.calendar is None:
result = "%s('%s')" % (self.__class__.__name__, self)
else:
result = "%s('%s', calendar='%s')" % (self.__class__.__name__,
self, self.calendar)
return result
def _offset_common(self, offset):
try:
offset = ctypes.c_double(offset)
except TypeError:
result = NotImplemented
else:
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot offset a 'no-unit'.")
else:
ut_unit = _ut_offset(self.ut_unit, offset)
if not ut_unit:
self._raise_error('Failed to offset %r' % self)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def __add__(self, other):
return self._offset_common(other)
def __sub__(self, other):
try:
other = -other
except TypeError:
result = NotImplemented
else:
result = self._offset_common(-other)
return result
def _op_common(self, other, op_func):
# Convienience method to create a new unit from an operation between
# the units 'self' and 'other'.
op_label = op_func.__name__.split('_')[1]
other = as_unit(other)
if self.is_no_unit() or other.is_no_unit():
raise ValueError("Cannot %s a 'no-unit'." % op_label)
if self.is_unknown() or other.is_unknown():
result = _Unit(_CATEGORY_UNKNOWN, None)
else:
ut_unit = op_func(self.ut_unit, other.ut_unit)
if not ut_unit:
msg = 'Failed to %s %r by %r' % (op_label, self, other)
self._raise_error(msg)
calendar = None
result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
return result
def __rmul__(self, other):
# NB. Because we've subclassed a tuple, we need to define this to
# prevent the default tuple-repetition behaviour.
# ie. 2 * ('a', 'b') -> ('a', 'b', 'a', 'b')
return self * other
def __mul__(self, other):
"""
Multiply the self unit by the other scale factor or unit and
return the Unit result.
Note that, multiplication involving an 'unknown' unit will always
result in an 'unknown' unit.
Args:
* other (int/float/string/Unit): Multiplication scale
factor or unit.
Returns:
Unit.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> v = unit.Unit('hertz')
>>> u*v
Unit('meter-second^-1')
"""
return self._op_common(other, _ut_multiply)
def __div__(self, other):
"""
Divide the self unit by the other scale factor or unit and
return the Unit result.
Note that, division involving an 'unknown' unit will always
result in an 'unknown' unit.
Args:
* other (int/float/string/Unit): Division scale factor or unit.
Returns:
Unit.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('m.s-1')
>>> v = unit.Unit('hertz')
>>> u/v
Unit('meter')
"""
return self._op_common(other, _ut_divide)
def __truediv__(self, other):
"""
Divide the self unit by the other scale factor or unit and
return the Unit result.
Note that, division involving an 'unknown' unit will always
result in an 'unknown' unit.
Args:
* other (int/float/string/Unit): Division scale factor or unit.
Returns:
Unit.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('m.s-1')
>>> v = unit.Unit('hertz')
>>> u/v
Unit('meter')
"""
return self.__div__(other)
def __pow__(self, power):
"""
Raise the unit by the given power and return the Unit result.
Note that, UDUNITS-2 does not support raising a
non-dimensionless unit by a fractional power.
Approximate floating point power behaviour has been implemented
specifically for Iris.
Args:
* power (int/float): Value by which the unit power is raised.
Returns:
Unit.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('meters')
>>> u**2
Unit('meter^2')
"""
try:
power = float(power)
except ValueError:
raise TypeError('A numeric value is required for the power'
' argument.')
if self.is_unknown():
result = self
elif self.is_no_unit():
raise ValueError("Cannot raise the power of a 'no-unit'.")
elif self == Unit('1'):
# 1 ** N -> 1
result = self
else:
# UDUNITS-2 does not support floating point raise/root.
# But if the power is of the form 1/N, where N is an integer
# (within a certain acceptable accuracy) then we can find the Nth
# root.
if not iris.util.approx_equal(power, 0.0) and abs(power) < 1:
if not iris.util.approx_equal(1 / power, round(1 / power)):
raise ValueError('Cannot raise a unit by a decimal.')
root = int(round(1 / power))
result = self.root(root)
else:
# Failing that, check for powers which are (very nearly) simple
# integer values.
if not iris.util.approx_equal(power, round(power)):
msg = 'Cannot raise a unit by a decimal (got %s).' % power
raise ValueError(msg)
power = int(round(power))
ut_unit = _ut_raise(self.ut_unit, ctypes.c_int(power))
if not ut_unit:
self._raise_error('Failed to raise the power of %r' % self)
result = _Unit(_CATEGORY_UDUNIT, ut_unit)
return result
def _identity(self):
# Redefine the comparison/hash/ordering identity as used by
# iris.util._OrderedHashable.
return (self.name, self.calendar)
__hash__ = iris.util._OrderedHashable.__hash__
def __eq__(self, other):
"""
Compare the two units for equality and return the boolean result.
Args:
* other (string/Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> from cf_units import Unit
>>> Unit('meters') == Unit('millimeters')
False
>>> Unit('meters') == 'm'
True
"""
other = as_unit(other)
# Compare category (i.e. unknown, no_unit, etc.).
if self.category != other.category:
return False
# Compare calendar as UDUNITS cannot handle calendars.
if self.calendar != other.calendar:
return False
# Compare UDUNITS.
res = _ut_compare(self.ut_unit, other.ut_unit)
return res == 0
def __ne__(self, other):
"""
Compare the two units for inequality and return the boolean result.
Args:
* other (string/Unit): Unit to be compared.
Returns:
Boolean.
For example:
>>> from cf_units import Unit
>>> Unit('meters') != Unit('millimeters')
True
>>> Unit('meters') != 'm'
False
"""
return not self == other
def convert(self, value, other, ctype=FLOAT64):
"""
Converts a single value or numpy array of values from the current unit
to the other target unit.
If the units are not convertible, then no conversion will take place.
Args:
* value (int/float/numpy.ndarray):
Value/s to be converted.
* other (string/Unit):
Target unit to convert to.
* ctype (ctypes.c_float/ctypes.c_double):
Floating point 32-bit single-precision (cf_units.FLOAT32) or
64-bit double-precision (cf_units.FLOAT64) used for conversion
when `value` is not a NumPy array or is a NumPy array composed of
NumPy integers. The default is 64-bit double-precision conversion.
Returns:
float or numpy.ndarray of appropriate float type.
For example:
>>> import cf_units as unit
>>> import numpy as np
>>> c = unit.Unit('deg_c')
>>> f = unit.Unit('deg_f')
>>> c.convert(0, f)
31.999999999999886
>>> c.convert(0, f, unit.FLOAT32)
32.0
>>> a64 = np.arange(10, dtype=np.float64)
>>> c.convert(a64, f)
array([ 32. , 33.8, 35.6, 37.4, 39.2, 41. , 42.8, 44.6, \
46.4, 48.2])
>>> a32 = np.arange(10, dtype=np.float32)
>>> c.convert(a32, f)
array([ 32. , 33.79999924, 35.59999847, 37.40000153,
39.20000076, 41. , 42.79999924, 44.59999847,
46.40000153, 48.20000076], dtype=float32)
.. note::
Conversion is done *in-place* for numpy arrays. Also note that,
conversion between unit calendars is not permitted.
"""
result = None
other = as_unit(other)
value_copy = copy.deepcopy(value)
if self == other:
return value
if self.is_convertible(other):
# Use utime for converting reference times that are not using a
# gregorian calendar as it handles these and udunits does not.
if self.is_time_reference() \
and self.calendar != CALENDAR_GREGORIAN:
ut1 = self.utime()
ut2 = other.utime()
result = ut2.date2num(ut1.num2date(value_copy))
# Preserve the datatype of the input array if it was float32.
if (isinstance(value, np.ndarray)
and value.dtype == np.float32):
result = result.astype(np.float32)
else:
ut_converter = _ut_get_converter(self.ut_unit, other.ut_unit)
if ut_converter:
if isinstance(value_copy, np.ndarray):
# Can only handle array of np.float32 or np.float64 so
# cast array of ints to array of floats of requested
# precision.
if issubclass(value_copy.dtype.type, np.integer):
value_copy = value_copy.astype(
_ctypes2numpy[ctype])
# Convert arrays with explicit endianness to native
# endianness: udunits seems to be tripped up by arrays
# with endianness other than native.
if value_copy.dtype.byteorder != '=':
value_copy = value_copy.astype(
value_copy.dtype.type)
# strict type check of numpy array
if value_copy.dtype.type not in _numpy2ctypes:
raise TypeError(
"Expect a numpy array of '%s' or '%s'" %
tuple(sorted(_numpy2ctypes.keys())))
ctype = _numpy2ctypes[value_copy.dtype.type]
pointer = value_copy.ctypes.data_as(
ctypes.POINTER(ctype))
# Utilise global convenience dictionary
# _cv_convert_array
_cv_convert_array[ctype](ut_converter, pointer,
value_copy.size, pointer)
result = value_copy
else:
if ctype not in _cv_convert_scalar:
raise ValueError('Invalid target type. Can only '
'convert to float or double.')
# Utilise global convenience dictionary
# _cv_convert_scalar
result = _cv_convert_scalar[ctype](ut_converter,
ctype(value_copy))
_cv_free(ut_converter)
else:
self._raise_error('Failed to convert %r to %r' %
(self, other))
else:
raise ValueError("Unable to convert from '%r' to '%r'." %
(self, other))
return result
def utime(self):
"""
Returns a netcdftime.utime object which performs conversions of
numeric time values to/from datetime objects given the current
calendar and unit time reference.
The current unit time reference must be of the form:
'<time-unit> since <time-origin>'
i.e. 'hours since 1970-01-01 00:00:00'
Returns:
netcdftime.utime.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('hours since 1970-01-01 00:00:00',
... calendar=unit.CALENDAR_STANDARD)
>>> ut = u.utime()
>>> ut.num2date(2)
datetime.datetime(1970, 1, 1, 2, 0)
"""
#
# ensure to strip out non-parsable 'UTC' postfix which
# is generated by UDUNITS-2 formatted output
#
if self.calendar is None:
raise ValueError('Unit has undefined calendar')
return netcdftime.utime(str(self).rstrip(" UTC"), self.calendar)
def date2num(self, date):
"""
Returns the numeric time value calculated from the datetime
object using the current calendar and unit time reference.
The current unit time reference must be of the form:
'<time-unit> since <time-origin>'
i.e. 'hours since 1970-01-01 00:00:00'
Works for scalars, sequences and numpy arrays. Returns a scalar
if input is a scalar, else returns a numpy array.
Args:
* date (datetime):
A datetime object or a sequence of datetime objects.
The datetime objects should not include a time-zone offset.
Returns:
float or numpy.ndarray of float.
For example:
>>> import cf_units as unit
>>> import datetime
>>> u = unit.Unit('hours since 1970-01-01 00:00:00',
... calendar=unit.CALENDAR_STANDARD)
>>> u.date2num(datetime.datetime(1970, 1, 1, 5))
5.00000000372529
>>> u.date2num([datetime.datetime(1970, 1, 1, 5),
... datetime.datetime(1970, 1, 1, 6)])
array([ 5., 6.])
"""
cdf_utime = self.utime()
return cdf_utime.date2num(date)
def num2date(self, time_value):
"""
Returns a datetime-like object calculated from the numeric time
value using the current calendar and the unit time reference.
The current unit time reference must be of the form:
'<time-unit> since <time-origin>'
i.e. 'hours since 1970-01-01 00:00:00'
The datetime objects returned are 'real' Python datetime objects
if the date falls in the Gregorian calendar (i.e. the calendar
is 'standard', 'gregorian', or 'proleptic_gregorian' and the
date is after 1582-10-15). Otherwise a 'phoney' datetime-like
object (netcdftime.datetime) is returned which can handle dates
that don't exist in the Proleptic Gregorian calendar.
Works for scalars, sequences and numpy arrays. Returns a scalar
if input is a scalar, else returns a numpy array.
Args:
* time_value (float): Numeric time value/s. Maximum resolution
is 1 second.
Returns:
datetime, or numpy.ndarray of datetime object.
For example:
>>> import cf_units as unit
>>> u = unit.Unit('hours since 1970-01-01 00:00:00',
... calendar=unit.CALENDAR_STANDARD)
>>> u.num2date(6)
datetime.datetime(1970, 1, 1, 6, 0)
>>> u.num2date([6, 7])
array([datetime.datetime(1970, 1, 1, 6, 0),
datetime.datetime(1970, 1, 1, 7, 0)], dtype=object)
"""
cdf_utime = self.utime()
return cdf_utime.num2date(time_value)
| mo-g/iris | lib/iris/unit.py | Python | gpl-3.0 | 62,925 |
from unittest import TestCase
from settings import settings
from office365.outlookservices.outlook_client import OutlookClient
from office365.runtime.auth.authentication_context import AuthenticationContext
class OutlookClientTestCase(TestCase):
"""SharePoint specific test case base class"""
@classmethod
def setUpClass(cls):
# Due to Outlook REST API v1.0 BasicAuth Deprecation
# (refer https://developer.microsoft.com/en-us/office/blogs/outlook-rest-api-v1-0-basicauth-deprecation/)
# NetworkCredentialContext class should be no longer utilized
# ctx_auth = NetworkCredentialContext(username=settings['user_credentials']['username'],
# password=settings['user_credentials']['password'])
ctx_auth = AuthenticationContext(url=settings['tenant'])
ctx_auth.acquire_token_password_grant(client_credentials=settings['client_credentials'],
user_credentials=settings['user_credentials'])
cls.client = OutlookClient(ctx_auth)
| vgrem/SharePointOnline-REST-Python-Client | tests/outlook_case.py | Python | mit | 1,082 |
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse a response to the 'ismaster' command."""
import itertools
from bson.py3compat import imap
from pymongo import common
from pymongo.server_type import SERVER_TYPE
def _get_server_type(doc):
"""Determine the server type from an ismaster response."""
if not doc.get('ok'):
return SERVER_TYPE.Unknown
if doc.get('isreplicaset'):
return SERVER_TYPE.RSGhost
elif doc.get('setName'):
if doc.get('hidden'):
return SERVER_TYPE.RSOther
elif doc.get('ismaster'):
return SERVER_TYPE.RSPrimary
elif doc.get('secondary'):
return SERVER_TYPE.RSSecondary
elif doc.get('arbiterOnly'):
return SERVER_TYPE.RSArbiter
else:
return SERVER_TYPE.RSOther
elif doc.get('msg') == 'isdbgrid':
return SERVER_TYPE.Mongos
else:
return SERVER_TYPE.Standalone
class IsMaster(object):
__slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable')
def __init__(self, doc):
"""Parse an ismaster response from the server."""
self._server_type = _get_server_type(doc)
self._doc = doc
self._is_writable = self._server_type in (
SERVER_TYPE.RSPrimary,
SERVER_TYPE.Standalone,
SERVER_TYPE.Mongos)
self._is_readable = (
self.server_type == SERVER_TYPE.RSSecondary
or self._is_writable)
@property
def server_type(self):
return self._server_type
@property
def all_hosts(self):
"""List of hosts, passives, and arbiters known to this server."""
return set(imap(common.clean_node, itertools.chain(
self._doc.get('hosts', []),
self._doc.get('passives', []),
self._doc.get('arbiters', []))))
@property
def tags(self):
"""Replica set member tags or empty dict."""
return self._doc.get('tags', {})
@property
def primary(self):
"""This server's opinion about who the primary is, or None."""
if self._doc.get('primary'):
return common.partition_node(self._doc['primary'])
else:
return None
@property
def replica_set_name(self):
"""Replica set name or None."""
return self._doc.get('setName')
@property
def max_bson_size(self):
return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE)
@property
def max_message_size(self):
return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size)
@property
def max_write_batch_size(self):
return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE)
@property
def min_wire_version(self):
return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION)
@property
def max_wire_version(self):
return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION)
@property
def election_id(self):
return self._doc.get('electionId')
@property
def is_writable(self):
return self._is_writable
@property
def is_readable(self):
return self._is_readable
| nichung/wwwflaskBlogrevA | venv/lib/python2.7/site-packages/pymongo/ismaster.py | Python | mit | 3,733 |
from django.contrib import admin
#from ittc.capabilities.models import Server, Extent, Layer, Collection, CollectionMember, TileService, TileServiceType, ImageType
from ittc.capabilities.models import Server, Extent, Layer, Collection, CollectionMember, TileServiceType, ImageType
class ExtentAdmin(admin.ModelAdmin):
model = Layer
list_display_links = ('id',)
list_display = ('id','bbox')
class LayerAdmin(admin.ModelAdmin):
model = Layer
list_display_links = ('id','name',)
list_display = ('id','name','slug',)
class CollectionAdmin(admin.ModelAdmin):
model = Collection
list_display_links = ('id','name',)
list_display = ('id','name','slug',)
class CollectionMemberAdmin(admin.ModelAdmin):
model = CollectionMember
list_display_links = ('id',)
list_display = ('id','collection','layer',)
#class TileServiceAdmin(admin.ModelAdmin):
# model = Layer
# list_display_links = ('id','name',)
# list_display = ('id','name','serviceType','imageType')
#class TileServiceAdmin(admin.ModelAdmin):
# model = Layer
# list_display_links = ('id','name',)
# list_display = ('id','name','slug','serviceType','srs')
class TileServiceTypeAdmin(admin.ModelAdmin):
model = Layer
list_display_links = ('identifier',)
list_display = ('identifier','name','description')
class ImageTypeAdmin(admin.ModelAdmin):
model = Layer
list_display_links = ('identifier',)
list_display = ('identifier','name','description')
class ServerAdmin(admin.ModelAdmin):
model = Server
list_display_links = ('id','name',)
list_display = ('id','name',)
admin.site.register(Layer, LayerAdmin)
admin.site.register(Collection,CollectionAdmin)
admin.site.register(CollectionMember,CollectionMemberAdmin)
#admin.site.register(TileService, TileServiceAdmin)
admin.site.register(TileServiceType, TileServiceTypeAdmin)
admin.site.register(ImageType, ImageTypeAdmin)
admin.site.register(Server, ServerAdmin)
admin.site.register(Extent, ExtentAdmin)
| state-hiu/ittc-server-django | ittc/capabilities/admin.py | Python | mit | 2,008 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
import chainermn
from chainermn.extensions import create_multi_node_checkpointer
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__(
# the size of the inputs to each layer will be inferred
l1=L.Linear(784, n_units), # n_in -> n_units
l2=L.Linear(n_units, n_units), # n_units -> n_units
l3=L.Linear(n_units, n_out), # n_units -> n_out
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
def main():
parser = argparse.ArgumentParser(
description='''\
ChainerMN example: MNIST with automatic checkpoints enabled''')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='Number of images in each mini-batch')
parser.add_argument('--communicator', type=str,
default='hierarchical', help='Type of communicator')
parser.add_argument('--epoch', '-e', type=int, default=20,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--unit', '-u', type=int, default=1000,
help='Number of units')
parser.add_argument('--run-id', type=str, default='train-mnist-example',
help='ID of the task name')
args = parser.parse_args()
# Prepare ChainerMN communicator.
if args.gpu:
if args.communicator == 'naive':
print('Error: \'naive\' communicator does not support GPU.\n')
exit(-1)
comm = chainermn.create_communicator(args.communicator)
device = comm.intra_rank
else:
if args.communicator != 'naive':
print('Warning: using naive communicator '
'because only naive supports CPU-only execution')
comm = chainermn.create_communicator('naive')
device = -1
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('Num epoch: {}'.format(args.epoch))
print('==========================================')
model = L.Classifier(MLP(args.unit, 10))
if device >= 0:
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
# Create a multi node optimizer from a standard Chainer optimizer.
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), comm)
optimizer.setup(model)
# Split and distribute the dataset. Only worker 0 loads the whole dataset.
# Datasets of worker 0 are evenly split and distributed to all workers.
if comm.rank == 0:
train, test = chainer.datasets.get_mnist()
else:
train, test = None, None
train = chainermn.scatter_dataset(train, comm, shuffle=True)
test = chainermn.scatter_dataset(test, comm, shuffle=True)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
# Enable checkpointer and recover from checkpoint if any checkpoint exists
checkpointer = create_multi_node_checkpointer(name=args.run_id, comm=comm)
checkpointer.maybe_load(trainer, optimizer)
print('Rank', comm.rank, ': (Re)Starting from (epoch, iter) =',
(trainer.updater.epoch, trainer.updater.iteration))
trainer.extend(checkpointer, trigger=(1000, 'iteration'))
# Create a multi node evaluator from a standard Chainer evaluator.
evaluator = extensions.Evaluator(test_iter, model, device=device)
evaluator = chainermn.create_multi_node_evaluator(evaluator, comm)
trainer.extend(evaluator)
# Some display and output extensions are necessary only for one worker.
# (Otherwise, there would just be repeated outputs.)
if comm.rank == 0:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
trainer.run()
if __name__ == '__main__':
main()
| tkerola/chainer | examples/chainermn/mnist/train_mnist_checkpoint.py | Python | mit | 5,159 |
from daemon import Daemon
| samrocketman/globd | glib/__init__.py | Python | apache-2.0 | 26 |
from __future__ import print_function
from bokeh.client import push_session
from bokeh.document import Document
from bokeh.models.layouts import WidgetBox
from bokeh.models.widgets import (
Icon, Button, Toggle, Dropdown, CheckboxGroup, RadioGroup,
CheckboxButtonGroup, RadioButtonGroup,
)
def button_handler():
print("button_handler: click")
def toggle_handler(active):
print("toggle_handler: %s" % active)
def dropdown_handler(value):
print("dropdown_handler: %s" % value)
def split_handler(value):
print("split_handler: %s" % value)
def checkbox_group_handler(active):
print("checkbox_group_handler: %s" % active)
def radio_group_handler(active):
print("radio_group_handler: %s" % active)
def checkbox_button_group_handler(active):
print("checkbox_button_group_handler: %s" % active)
def radio_button_group_handler(active):
print("radio_button_group_handler: %s" % active)
button = Button(label="Button (disabled) - still has click event", icon=Icon(icon_name="check"), button_type="primary", disabled=True)
button.on_click(button_handler)
toggle = Toggle(label="Toggle button", button_type="success")
toggle.on_click(toggle_handler)
menu = [("Item 1", "item_1_value"), ("Item 2", "item_2_value"), ("Item 3", "item_3_value")]
dropdown = Dropdown(label="Dropdown button", button_type="warning", menu=menu, default_value="item_1_value")
dropdown.on_click(dropdown_handler)
split_menu = [("Item 1", "item_1_value"), ("Item 2", "item_2_value"), None, ("Item 3", "item_3_value")]
split = Dropdown(label="Split button", button_type="danger", menu=split_menu)
split.on_click(split_handler)
checkbox_group = CheckboxGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1])
checkbox_group.on_click(checkbox_group_handler)
radio_group = RadioGroup(labels=["Option 1", "Option 2", "Option 3"], active=0)
radio_group.on_click(radio_group_handler)
checkbox_button_group = CheckboxButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=[0, 1])
checkbox_button_group.on_click(checkbox_button_group_handler)
radio_button_group = RadioButtonGroup(labels=["Option 1", "Option 2", "Option 3"], active=0)
radio_button_group.on_click(radio_button_group_handler)
widgetBox = WidgetBox(children=[button, toggle, dropdown, split, checkbox_group, radio_group, checkbox_button_group, radio_button_group])
document = Document()
document.add_root(widgetBox)
session = push_session(document)
session.show()
if __name__ == "__main__":
session.loop_until_closed()
| phobson/bokeh | examples/models/buttons_server.py | Python | bsd-3-clause | 2,518 |
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
help = 'Updates a Site object.'
args = '<site id>'
option_list = BaseCommand.option_list + (
make_option('-s', '--site-id',
dest='site',
default='1',
help='The site id to update. Defaults to 1.'),
make_option('-d', '--domain',
dest='domain',
default='http://example.com',
help='The site\'s domain. http://example.com'),
make_option('-n', '--display-name',
dest='name',
default='example.com',
help='The display name of the site.'),
)
def handle(self, *args, **options):
site_id = options.get('site')
domain = args[0] if args else options.get('domain')
name = options.get('name')
site, created = Site.objects.get_or_create(pk=int(site_id), defaults=dict(domain=domain, name=name))
site.save()
Site.objects.clear_cache()
self.stdout.write('Successfully updated site {0} to: {1}.'.format(site_id, site.domain))
| state-hiu/rogue_geonode | geoshape/core/management/commands/siteupdate.py | Python | gpl-3.0 | 1,253 |
# -*- coding:utf-8 -*-
import math
# 这个开始我还没有考虑dp什么那么多,自己想没有想出来,看了网上讨论.
# 假设台阶总是为100阶
# 分为51种情况,有0次两个台阶,有1次上两个台阶...有50次上两个台阶.
# 没有实现,太麻烦
# def count_stairs(n):
# if n % 2 == 0:
# count_2 = n / 2 + 1
# start = 3
# else:
# count_2 = (n + 1) / 2 + 1
# start = 2
#
# counts = 1
# for i in xrange(count_2):
# for j in xrange(start, i):
# counts *= c(n - 2 * j, 1)
# counts + count_stairs(n - 1)
# counts += 2 if start == 3 else 1
# return counts
#
#
# def c(m, n):
# return math.factorial(m) / math.factorial(n) ** 2
def count_stairs_dp(n):
f1 = 1
f2 = 2
fn = 0
if n == 1:
return f1
if n == 2:
return f2
for i in xrange(n):
fn = f1 + f2
f1 = f2
f2 = fn
return fn
print(count_stairs_dp(10))
| xudongyangwork/algo | day8/xudy.py | Python | mit | 986 |
from builtins import *
from domain.channel import Channel
from domain.client import Client
from domain.protocol import Protocol
from system import hooks
from system.ircfactory import IRCFactory
from system.messagehandler import MessageHandler
from table import Table
from tablerow import TableRow
from util import logger
from util.dictable import Dictable
class Autodoc:
"""
Okay, so writing documentation is tedious and boring. Tedious and boring things can be automated.
This is my attempt at doing so.
"""
def __init__(self):
pass
@staticmethod
def getvariables(cls):
"""
Gets all variables in a class, returns them in a list.
Static method.
:example: getvariables(Autodoc)
:type cls: object | Class
:rtype: [variable: object, ...]
"""
return [(name, getattr(cls, name)) for name in dir(cls)
if not name.startswith('_')
and not callable(getattr(cls, name))]
@staticmethod
def getmethods(cls):
"""
Gets all methods in a class, returns them in a list.
Static method.
:example: getmethods(Autodoc)
:type cls: object | Class
:rtype: [method: function, ...]
"""
return [(name, getattr(cls, name)) for name in dir(cls)
if not name.startswith('_')
and callable(getattr(cls, name))]
@staticmethod
def slicedocstring(obj):
"""
Splits an object's docstring into a list of stripped lines.
:type obj: object
:rtype: [str, ...]
"""
if obj.__doc__ is None:
return []
return [line.strip().replace('<', '<').replace('>', '>')
for line in obj.__doc__.splitlines()
if line.strip()]
@staticmethod
def itemtotablerow(name, item):
"""
Takes an item returned from getmethod or getvariables and returns a TableRow
:type name: str
:type item: object
:rtype: tablerow.TableRow
"""
tablerow = TableRow(name)
docstr = Autodoc.slicedocstring(item)
tablerow.description = '<br>\n'.join(s for s in docstr if not s.startswith(':'))
tablerow.example = Autodoc.getdocstrbytype(':example:', docstr)
tablerow.returns = Autodoc.getdocstrbytype(':rtype:', docstr)
tablerow.params = ',<br>\n'.join(s.split(' ', 1)[1].replace('<', '<').replace('>', '>')
for s in docstr if s.startswith(':type '))
return tablerow
@staticmethod
def getdocstrbytype(startswith, docstr):
"""
Returns a string where all the lines starting with startswith joined by ",<br>\\n"
:type startswith: str
:rtype: str
"""
return ',<br>\n'.join(s.split(': ', 1)[1].replace('<', '<').replace('>', '>')
for s in docstr if s.startswith(startswith))
@staticmethod
def itemrowstotable(nameitems):
"""
Takes the names and items returned by getmethod or getvariables and returns a table.
:type nameitems: [(str, object), ...]
:rtype: table.Table
"""
table = Table()
for name, item in nameitems:
tr = Autodoc.itemtotablerow(name, item)
table.tablerows.append(tr)
return table
def test():
import sys
classes = [Channel,
Client,
IRCFactory,
MessageHandler,
Autodoc,
Dictable,
Protocol,
Protocol.Nick,
Protocol.Whois,
logger,
hooks]
# os.mkdir("docs")
for cls in classes:
_stdout = sys.stdout
path = 'docs/{name}.htm'.format(name=cls.__name__.lower())
with open(path, 'w+') as sys.stdout:
print("<!doctype html>")
print("<html>")
print("<head>")
print("<title>Autodoc: {name}</title>".format(name=cls.__name__))
print("""<style>body {font-family: Arial;}</style>""")
print("</head>")
print("<body>")
print('<h1>{name}</h1>'.format(name=cls.__name__))
print('<p>{desc}</p>'.format(desc='<br>\n'.join(Autodoc.slicedocstring(cls))))
print()
print('<h2>Variables</h2>')
print(Autodoc.itemrowstotable(Autodoc.getvariables(cls)))
print()
print('<h2>Methods</h2>')
print(Autodoc.itemrowstotable(Autodoc.getmethods(cls)))
print("<br><hr><h5>Generated with Autodoc</h5>")
print("</body>")
print("</html>")
sys.stdout = _stdout
if __name__ == '__main__':
test() | yukaritan/kawaiirc | autodoc/autodoc.py | Python | gpl-2.0 | 4,824 |
#!/usr/bin/env python3
import numpy as np
import torch
import itertools
import argparse
import training
import data
import plotting
import model
def main(args):
np.random.seed(args.seed)
gens = data.instantiate_generators()
X, t_phn, t_spk = data.generate(gens, 100)
X_val, t_phn_val, t_spk_val = data.generate(gens, 100)
plotter = plotting.Plotter(args.no_plot)
plotter.plot(X, t_phn, t_spk, name="Raw data")
raw_bl, raw_ur = plotter.plot(
X_val, t_phn_val, t_spk_val, name="Raw validation data"
)
torch.manual_seed(args.seed)
bne, phn_dec, spk_dec = model.create_models(args.bne_width)
print("\nTraining PHN network")
training.train(bne, [phn_dec],
itertools.chain(bne.parameters(), phn_dec.parameters()),
(X, [t_phn]), (X_val, [t_phn_val]),
args.nb_epochs)
bl, ur = plotter.plot(
X, t_phn, t_spk,
name="BN features, PHN optimized", transform=bne
)
plotting.plot_preds(
plotter, "PHN decoding in raw space",
raw_bl, raw_ur, lambda x: phn_dec(bne(x))
)
plotting.plot_preds(
plotter, "PHN decoding in BN space",
bl, ur, phn_dec
)
print("\nTraining SPK decoder")
training.train(bne, [spk_dec],
spk_dec.parameters(),
(X, [t_spk]), (X_val, [t_spk_val]),
args.nb_epochs)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--nb-epochs", type=int, default=200,
help="number of training epochs")
parser.add_argument(
"--bne-width", type=int, default=100,
help="width of the bottleneck extractor (its hidden layers)"
)
parser.add_argument(
"--seed", type=int, default=1337,
help="seed for both NumPy data and PyTorch model weights sampling"
)
parser.add_argument("--no-plot", action="store_true",
help="do no plotting")
args = parser.parse_args()
main(args)
| ibenes/speaker-oblivious-bottlenecks | main.py | Python | apache-2.0 | 2,008 |
'''
https://leetcode.com/contest/weekly-contest-172/problems/maximum-69-number/
'''
class Solution:
def maximum69Number (self, num: int) -> int:
stk = []
while num > 0:
stk.append(num % 10)
num //= 10
done = False
while len(stk) > 0:
if not done and stk[-1] == 6:
stk[-1] = 9; done = True
num = num*10 + stk[-1]
stk.pop()
return num
| jan25/code_sorted | leetcode/weekly172/1_max69.py | Python | unlicense | 457 |
class Variants(object):
def __init__(self, file_path):
self.load_variants(file_path)
def load_variants(self, file_path):
variants = []
with open(file_path) as f:
for line in f:
tokens = line.split()
chrom, name, pos, coord, A1, A2 = tokens
variant = Variant(chrom, name, pos, coord, A1, A2)
variants.append(variant)
self.variants = variants
def get_variants(self):
return self.variants
def get_n_variants(self):
return len(self.variants)
def get_index(self, variant_name):
for index, variant in enumerate(self.variants):
if variant.name == variant_name:
return index
def get_variant(self, variant_index):
return self.variants[variant_index]
# def get_counts(self, variant_index):
# A1 = get_A2(variant_index)
# A1 = get_A2(variant_index)
def get_name(self, variant_index):
return self.variants[variant_index].get_name()
def __str__(self):
return '\n'.join([str(variant) for variant in self.variants])
class Variant(object):
def __init__(self, chrom, name, pos, coord, A1, A2):
self.chrom = chrom
self.name = name
self.pos = pos
self.coord = coord
self.A1 = A1
self.A2 = A2
def get_A1(self):
return self.A1
def get_A2(self):
return self.A2
def get_name(self):
return self.name
def get_chrom(self):
return self.chrom
def __str__(self):
return '{}:{} {}'.format(self.chrom, self.pos, self.name)
| dlrice/evoker-lite | evokerlite/variants.py | Python | gpl-3.0 | 1,658 |
"""
Benchmark to help choosing the best chunksize so as to optimize the
access time in random lookups.
"""
from time import time
import os
import subprocess
import numpy
import tables
# Constants
NOISE = 1e-15 # standard deviation of the noise compared with actual values
rdm_cod = ['lin', 'rnd']
def get_nrows(nrows_str):
if nrows_str.endswith("k"):
return int(float(nrows_str[:-1])*1000)
elif nrows_str.endswith("m"):
return int(float(nrows_str[:-1])*1000*1000)
elif nrows_str.endswith("g"):
return int(float(nrows_str[:-1])*1000*1000*1000)
else:
raise ValueError("value of nrows must end with either 'k', 'm' or 'g' suffixes.")
class DB(object):
def __init__(self, nrows, dtype, chunksize, userandom, datadir,
docompress=0, complib='zlib'):
self.dtype = dtype
self.docompress = docompress
self.complib = complib
self.filename = '-'.join([rdm_cod[userandom],
"n"+nrows, "s"+chunksize, dtype])
# Complete the filename
self.filename = "lookup-" + self.filename
if docompress:
self.filename += '-' + complib + str(docompress)
self.filename = datadir + '/' + self.filename + '.h5'
print "Processing database:", self.filename
self.userandom = userandom
self.nrows = get_nrows(nrows)
self.chunksize = get_nrows(chunksize)
self.step = self.chunksize
self.scale = NOISE
def get_db_size(self):
sout = subprocess.Popen("sync;du -s %s" % self.filename, shell=True,
stdout=subprocess.PIPE).stdout
line = [l for l in sout][0]
return int(line.split()[0])
def print_mtime(self, t1, explain):
mtime = time()-t1
print "%s:" % explain, round(mtime, 6)
print "Krows/s:", round((self.nrows/1000.)/mtime, 6)
def print_db_sizes(self, init, filled):
array_size = (filled-init)/1024.
print "Array size (MB):", round(array_size, 3)
def open_db(self, remove=0):
if remove and os.path.exists(self.filename):
os.remove(self.filename)
con = tables.openFile(self.filename, 'a')
return con
def create_db(self, verbose):
self.con = self.open_db(remove=1)
self.create_array()
init_size = self.get_db_size()
t1=time()
self.fill_array()
array_size = self.get_db_size()
self.print_mtime(t1, 'Insert time')
self.print_db_sizes(init_size, array_size)
self.close_db()
def create_array(self):
# The filters chosen
filters = tables.Filters(complevel=self.docompress,
complib=self.complib)
atom = tables.Atom.from_kind(self.dtype)
earray = self.con.createEArray(self.con.root, 'earray', atom, (0,),
filters=filters,
expectedrows=self.nrows,
chunkshape=(self.chunksize,))
def fill_array(self):
"Fills the array"
earray = self.con.root.earray
j = 0
arr = self.get_array(0, self.step)
for i in xrange(0, self.nrows, self.step):
stop = (j+1)*self.step
if stop > self.nrows:
stop = self.nrows
###arr = self.get_array(i, stop, dtype)
earray.append(arr)
j += 1
earray.flush()
def get_array(self, start, stop):
arr = numpy.arange(start, stop, dtype='float')
if self.userandom:
arr += numpy.random.normal(0, stop*self.scale, size=stop-start)
arr = arr.astype(self.dtype)
return arr
def print_qtime(self, ltimes):
ltimes = numpy.array(ltimes)
print "Raw query times:\n", ltimes
print "Histogram times:\n", numpy.histogram(ltimes[1:])
ntimes = len(ltimes)
qtime1 = ltimes[0] # First measured time
if ntimes > 5:
# Wait until the 5th iteration (in order to
# ensure that the index is effectively cached) to take times
qtime2 = sum(ltimes[5:])/(ntimes-5)
else:
qtime2 = ltimes[-1] # Last measured time
print "1st query time:", round(qtime1, 3)
print "Mean (skipping the first 5 meas.):", round(qtime2, 3)
def query_db(self, niter, avoidfscache, verbose):
self.con = self.open_db()
earray = self.con.root.earray
if avoidfscache:
rseed = int(numpy.random.randint(self.nrows))
else:
rseed = 19
numpy.random.seed(rseed)
base = numpy.random.randint(self.nrows)
ltimes = []
for i in range(niter):
t1=time()
results = self.do_query(earray, numpy.random.randint(self.nrows))
ltimes.append(time()-t1)
self.print_qtime(ltimes)
self.close_db()
def do_query(self, earray, idx):
return earray[idx]
def close_db(self):
self.con.close()
if __name__=="__main__":
import sys
import getopt
usage = """usage: %s [-v] [-m] [-c] [-q] [-x] [-z complevel] [-l complib] [-N niter] [-n nrows] [-d datadir] [-t] type [-s] chunksize
-v verbose
-m use random values to fill the array
-q do a (random) lookup
-x choose a different seed for random numbers (i.e. avoid FS cache)
-c create the file
-z compress with zlib (no compression by default)
-l use complib for compression (zlib used by default)
-N number of iterations for reading
-n sets the number of rows in the array
-d directory to save data (default: data.nobackup)
-t select the type for array ('int' or 'float'. def 'float')
-s select the chunksize for array
\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vmcqxz:l:N:n:d:t:s:')
except:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
userandom = 0
docreate = 0
optlevel = 0
docompress = 0
complib = "zlib"
doquery = False
avoidfscache = 0
krows = '1k'
chunksize = '32k'
niter = 50
datadir = "data.nobackup"
dtype = "float"
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
elif option[0] == '-m':
userandom = 1
elif option[0] == '-c':
docreate = 1
createindex = 1
elif option[0] == '-q':
doquery = True
elif option[0] == '-x':
avoidfscache = 1
elif option[0] == '-z':
docompress = int(option[1])
elif option[0] == '-l':
complib = option[1]
elif option[0] == '-N':
niter = int(option[1])
elif option[0] == '-n':
krows = option[1]
elif option[0] == '-d':
datadir = option[1]
elif option[0] == '-t':
if option[1] in ('int', 'float'):
dtype = option[1]
else:
print "type should be either 'int' or 'float'"
sys.exit(0)
elif option[0] == '-s':
chunksize = option[1]
if not avoidfscache:
# in order to always generate the same random sequence
numpy.random.seed(20)
if verbose:
if userandom:
print "using random values"
db = DB(krows, dtype, chunksize, userandom, datadir, docompress, complib)
if docreate:
if verbose:
print "writing %s rows" % krows
db.create_db(verbose)
if doquery:
print "Calling query_db() %s times" % niter
db.query_db(niter, avoidfscache, verbose)
| cpcloud/PyTables | bench/lookup_bench.py | Python | bsd-3-clause | 7,870 |
def square(x):
return x * x
| yehnan/python_book_yehnan | ch10/package_example/formats/bar.py | Python | gpl-2.0 | 38 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Graphical representation of a node on the QGraphicsScene.
"""
from ..qt import QtCore, QtGui, QtSvg
from .note_item import NoteItem
class NodeItem(QtSvg.QGraphicsSvgItem):
"""
Node for the scene.
:param node: Node instance
:param default_symbol: Default symbol for the node representation on the scene
:param hover_symbol: Hover symbol when the node is hovered on the scene
"""
show_layer = False
def __init__(self, node, default_symbol=None, hover_symbol=None):
QtSvg.QGraphicsSvgItem.__init__(self)
# attached node
self._node = node
# node label
self._node_label = None
# link items connected to this node item.
self._links = []
# set graphical settings for this node
self.setFlag(QtSvg.QGraphicsSvgItem.ItemIsMovable)
self.setFlag(QtSvg.QGraphicsSvgItem.ItemIsSelectable)
self.setFlag(QtSvg.QGraphicsSvgItem.ItemIsFocusable)
self.setFlag(QtSvg.QGraphicsSvgItem.ItemSendsGeometryChanges)
self.setAcceptsHoverEvents(True)
self.setZValue(1)
# create renderers using symbols paths/resources
if default_symbol:
self._default_renderer = QtSvg.QSvgRenderer(default_symbol)
if default_symbol != node.defaultSymbol():
self._default_renderer.setObjectName(default_symbol)
else:
self._default_renderer = QtSvg.QSvgRenderer(node.defaultSymbol())
if hover_symbol:
self._hover_renderer = QtSvg.QSvgRenderer(hover_symbol)
if hover_symbol != node.hoverSymbol():
self._hover_renderer.setObjectName(hover_symbol)
else:
self._hover_renderer = QtSvg.QSvgRenderer(node.hoverSymbol())
self.setSharedRenderer(self._default_renderer)
# connect signals to know about some events
# e.g. when the node has been started, stopped or suspended etc.
node.created_signal.connect(self.createdSlot)
node.started_signal.connect(self.startedSlot)
node.stopped_signal.connect(self.stoppedSlot)
node.suspended_signal.connect(self.suspendedSlot)
node.updated_signal.connect(self.updatedSlot)
node.deleted_signal.connect(self.deletedSlot)
node.delete_links_signal.connect(self.deleteLinksSlot)
node.error_signal.connect(self.errorSlot)
node.server_error_signal.connect(self.serverErrorSlot)
# used when a port has been selected from the contextual menu
self._selected_port = None
# says if the attached node has been initialized
# by the server.
self._initialized = False
# contains the last error message received
# from the server.
self._last_error = None
from ..main_window import MainWindow
self._main_window = MainWindow.instance()
self._settings = self._main_window.uiGraphicsView.settings()
def defaultRenderer(self):
"""
Returns the default QSvgRenderer.
:return: QSvgRenderer instance
"""
return self._default_renderer
def setDefaultRenderer(self, default_renderer):
"""
Sets new default QSvgRenderer.
:param default_renderer: QSvgRenderer instance
"""
self._default_renderer = default_renderer
self.setSharedRenderer(self._default_renderer)
def hoverRenderer(self):
"""
Returns the hover QSvgRenderer.
:return: QSvgRenderer instance
"""
return self._hover_renderer
def setHoverRenderer(self, hover_renderer):
"""
Sets new hover QSvgRenderer.
:param hover_renderer: QSvgRenderer instance
"""
self._hover_renderer = hover_renderer
def setUnsavedState(self):
"""
Indicates the project is in a unsaved state.
"""
from ..main_window import MainWindow
main_window = MainWindow.instance()
main_window.setUnsavedState()
def node(self):
"""
Returns the node attached to this node item.
:returns: Node instance
"""
return self._node
def addLink(self, link):
"""
Adds a link items to this node item.
:param link: LinkItem instance
"""
self._links.append(link)
self._node.updated_signal.emit()
self.setUnsavedState()
def removeLink(self, link):
"""
Removes a link items from this node item.
:param link: LinkItem instance
"""
if link in self._links:
self._links.remove(link)
self.setUnsavedState()
def links(self):
"""
Returns all the link items attached to this node item.
:returns: list of LinkItem instances
"""
return self._links
def createdSlot(self, node_id):
"""
Slot to receive events from the attached Node instance
when a the node has been created/initialized.
:param node_id: node identifier (integer)
"""
self._initialized = True
self.update()
self._showLabel()
def startedSlot(self):
"""
Slot to receive events from the attached Node instance
when a the node has started.
"""
for link in self._links:
link.update()
def stoppedSlot(self):
"""
Slot to receive events from the attached Node instance
when a the node has stopped.
"""
for link in self._links:
link.update()
def suspendedSlot(self):
"""
Slot to receive events from the attached Node instance
when a the node has suspended.
"""
for link in self._links:
link.update()
def updatedSlot(self):
"""
Slot to receive events from the attached Node instance
when a the node has been updated.
"""
if self._node_label:
if self._node_label.toPlainText() != self._node.name():
self._node_label.setPlainText(self._node.name())
self._centerLabel()
self.setUnsavedState()
# update the link tooltips in case the
# node name has changed
for link in self._links:
link.setCustomToolTip()
def deleteLinksSlot(self):
"""
Slot to receive events from the attached Node instance
when a all the links must be deleted.
"""
for link in self._links.copy():
link.delete()
def deletedSlot(self):
"""
Slot to receive events from the attached Node instance
when the node has been deleted.
"""
self._node.removeAllocatedName()
if self in self.scene().items():
self.scene().removeItem(self)
self.setUnsavedState()
def serverErrorSlot(self, node_id, message):
"""
Slot to receive events from the attached Node instance
when the node has received an error from the server.
:param node_id: node identifier
:param message: error message
"""
self._last_error = "{message}".format(message=message)
def errorSlot(self, node_id, message):
"""
Slot to receive events from the attached Node instance
when the node wants to report an error.
:param node_id: node identifier
:param message: error message
"""
self._last_error = "{message}".format(message=message)
def setCustomToolTip(self):
"""
Sets a new ToolTip.
"""
if not self._initialized:
if not self._last_error:
error = "unknown error"
else:
error = self._last_error
self.setToolTip("This node isn't initialized\n{}".format(error))
else:
self.setToolTip(self._node.info())
def label(self):
"""
Returns the node label.
:return: NoteItem instance.
"""
return self._node_label
def setLabel(self, label):
"""
Sets the node label.
:param label: NoteItem instance.
"""
self._node_label = label
def _centerLabel(self):
"""
Centers the node label.
"""
text_rect = self._node_label.boundingRect()
text_middle = text_rect.topRight() / 2
node_rect = self.boundingRect()
node_middle = node_rect.topRight() / 2
label_x_pos = node_middle.x() - text_middle.x()
label_y_pos = -25
self._node_label.setPos(label_x_pos, label_y_pos)
def _showLabel(self):
"""
Shows the node label on the scene.
"""
if not self._node_label:
self._node_label = NoteItem(self)
self._node_label.setEditable(False)
self._node_label.setPlainText(self._node.name())
self._centerLabel()
def connectToPort(self, unavailable_ports=[]):
"""
Shows a contextual menu for the user to choose port or auto-select one.
:param unavailable_ports: list of port names that the user cannot choose
:returns: Port instance corresponding to the selected port
"""
self._selected_port = None
menu = QtGui.QMenu()
ports = self._node.ports()
if not ports:
QtGui.QMessageBox.critical(self.scene().parent(), "Link", "No port available, please configure this device")
return None
# sort the ports
ports_dict = {}
for port in ports:
if port.adapterNumber() is not None:
# make the port number unique (special case with WICs).
port_number = port.portNumber()
if port_number >= 16:
port_number *= 4
ports_dict[(port.adapterNumber() * 16) + port_number] = port
elif port.portNumber()is not None:
ports_dict[port.portNumber()] = port
else:
ports_dict[port.name()] = port
try:
ports = sorted(ports_dict.keys(), key=int)
except ValueError:
ports = sorted(ports_dict.keys())
# show a contextual menu for the user to choose a port
for port in ports:
port_object = ports_dict[port]
if port in unavailable_ports:
# this port cannot be chosen by the user (grayed out)
action = menu.addAction(QtGui.QIcon(':/icons/led_green.svg'), port_object.name())
action.setDisabled(True)
elif port_object.isFree():
menu.addAction(QtGui.QIcon(':/icons/led_red.svg'), port_object.name())
else:
menu.addAction(QtGui.QIcon(':/icons/led_green.svg'), port_object.name())
menu.triggered.connect(self.selectedPortSlot)
menu.exec_(QtGui.QCursor.pos())
return self._selected_port
def selectedPortSlot(self, action):
"""
Slot to receive events when a port has been selected in the
contextual menu.
:param action: QAction instance
"""
ports = self._node.ports()
# get the Port instance based on the selected port name.
for port in ports:
if port.name() == str(action.text()):
self._selected_port = port
break
def itemChange(self, change, value):
"""
Notifies this node item that some part of the item's state changes.
:param change: GraphicsItemChange type
:param value: value of the change
"""
# dynamically change the renderer when this node item is selected/unselected.
if change == QtSvg.QGraphicsSvgItem.ItemSelectedChange:
if value:
self.setSharedRenderer(self._hover_renderer)
else:
self.setSharedRenderer(self._default_renderer)
# adjust link item positions when this node is moving or has changed.
if change == QtSvg.QGraphicsSvgItem.ItemPositionChange or change == QtSvg.QGraphicsSvgItem.ItemPositionHasChanged:
self.setUnsavedState()
for link in self._links:
link.adjust()
return QtGui.QGraphicsItem.itemChange(self, change, value)
def paint(self, painter, option, widget=None):
"""
Paints the contents of an item in local coordinates.
:param painter: QPainter instance
:param option: QStyleOptionGraphicsItem instance
:param widget: QWidget instance
"""
# don't show the selection rectangle
if not self._settings["draw_rectangle_selected_item"]:
option.state = QtGui.QStyle.State_None
QtSvg.QGraphicsSvgItem.paint(self, painter, option, widget)
if not self._initialized or self.show_layer:
brect = self.boundingRect()
center = self.mapFromItem(self, brect.width() / 2.0, brect.height() / 2.0)
painter.setBrush(QtCore.Qt.red)
painter.setPen(QtCore.Qt.red)
painter.drawRect((brect.width() / 2.0) - 10, (brect.height() / 2.0) - 10, 20, 20)
painter.setPen(QtCore.Qt.black)
if self.show_layer:
text = str(int(self.zValue())) # Z value
elif self._last_error:
text = "E" # error
else:
text = "S" # initialization
painter.drawText(QtCore.QPointF(center.x() - 4, center.y() + 4), text)
def setZValue(self, value):
"""
Sets a new Z value.
:param value: Z value
"""
QtSvg.QGraphicsSvgItem.setZValue(self, value)
if self.zValue() < 0:
self.setFlag(self.ItemIsSelectable, False)
self.setFlag(self.ItemIsMovable, False)
if self._node_label:
self._node_label.setFlag(self.ItemIsSelectable, False)
self._node_label.setFlag(self.ItemIsMovable, False)
else:
self.setFlag(self.ItemIsSelectable, True)
self.setFlag(self.ItemIsMovable, True)
if self._node_label:
self._node_label.setFlag(self.ItemIsSelectable, True)
self._node_label.setFlag(self.ItemIsMovable, True)
for link in self._links:
link.adjust()
def hoverEnterEvent(self, event):
"""
Handles all hover enter events for this item.
:param event: QGraphicsSceneHoverEvent instance
"""
self.setCustomToolTip()
# dynamically change the renderer when this node item is hovered.
if not self.isSelected():
self.setSharedRenderer(self._hover_renderer)
# effect = QtGui.QGraphicsColorizeEffect()
# effect.setColor(QtGui.QColor("black"))
# effect.setStrength(0.8)
# self.setGraphicsEffect(effect)
def hoverLeaveEvent(self, event):
"""
Handles all hover leave events for this item.
:param event: QGraphicsSceneHoverEvent instance
"""
# dynamically change the renderer back to the default when this node item is not hovered anymore.
if not self.isSelected():
self.setSharedRenderer(self._default_renderer)
# self.graphicsEffect().setEnabled(False)
| noplay/gns3-gui | gns3/items/node_item.py | Python | gpl-3.0 | 16,117 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.