repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
snakeleon/YouCompleteMe-x86 | third_party/ycmd/third_party/JediHTTP/vendor/jedi/test/completion/stdlib.py | 2 | 3045 | """
std library stuff
"""
# -----------------
# builtins
# -----------------
arr = ['']
#? str()
sorted(arr)[0]
#? str()
next(reversed(arr))
next(reversed(arr))
# should not fail if there's no return value.
def yielder():
yield None
#? None
next(reversed(yielder()))
# empty reversed should not raise an error
#?
next(reversed())
#? str()
next(open(''))
#? int()
{'a':2}.setdefault('a', 3)
# Compiled classes should have the meta class attributes.
#? ['__itemsize__']
tuple.__itemsize__
# -----------------
# type() calls with one parameter
# -----------------
#? int
type(1)
#? int
type(int())
#? type
type(int)
#? type
type(type)
#? list
type([])
def x():
yield 1
generator = type(x())
#? generator
type(x for x in [])
#? type(x)
type(lambda: x)
import math
import os
#? type(os)
type(math)
class X(): pass
#? type
type(X)
# -----------------
# enumerate
# -----------------
for i, j in enumerate(["as", "ad"]):
#? int()
i
#? str()
j
# -----------------
# re
# -----------------
import re
c = re.compile(r'a')
# re.compile should not return str -> issue #68
#? []
c.startswith
#? int()
c.match().start()
#? int()
re.match(r'a', 'a').start()
for a in re.finditer('a', 'a'):
#? int()
a.start()
#? str()
re.sub('a', 'a')
# -----------------
# ref
# -----------------
import weakref
#? int()
weakref.proxy(1)
#? weakref.ref()
weakref.ref(1)
#? int()
weakref.ref(1)()
# -----------------
# functools
# -----------------
import functools
basetwo = functools.partial(int, base=2)
#? int()
basetwo()
def function(a, b):
return a, b
a = functools.partial(function, 0)
#? int()
a('')[0]
#? str()
a('')[1]
kw = functools.partial(function, b=1.0)
tup = kw(1)
#? int()
tup[0]
#? float()
tup[1]
def my_decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwds):
return f(*args, **kwds)
return wrapper
@my_decorator
def example(a):
return a
#? str()
example('')
# -----------------
# sqlite3 (#84)
# -----------------
import sqlite3
#? sqlite3.Connection()
con = sqlite3.connect()
#? sqlite3.Cursor()
c = con.cursor()
#? sqlite3.Row()
row = c.fetchall()[0]
#? str()
row.keys()[0]
def huhu(db):
"""
:type db: sqlite3.Connection
:param db: the db connection
"""
#? sqlite3.Connection()
db
# -----------------
# hashlib
# -----------------
import hashlib
#? ['md5']
hashlib.md5
# -----------------
# copy
# -----------------
import copy
#? int()
copy.deepcopy(1)
#?
copy.copy()
# -----------------
# json
# -----------------
# We don't want any results for json, because it depends on IO.
import json
#?
json.load('asdf')
#?
json.loads('[1]')
# -----------------
# random
# -----------------
import random
class A(object):
def say(self): pass
class B(object):
def shout(self): pass
cls = random.choice([A, B])
#? ['say', 'shout']
cls().s
# -----------------
# random
# -----------------
import zipfile
z = zipfile.ZipFile("foo")
# It's too slow. So we don't run it at the moment.
##? ['upper']
z.read('name').upper
| gpl-3.0 | -7,344,834,146,006,445,000 | 12.59375 | 63 | 0.503777 | false | 2.848457 | false | false | false |
dirkhusemann/rezzme | RezzMe/uri.py | 1 | 36319 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
# Copyright (c) Contributors, http://opensimulator.org/
# See CONTRIBUTORS.TXT for a full list of copyright holders.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the OpenSim Project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# import logging
import types
import urllib
import RezzMe.parse
import RezzMe.exceptions
class Uri(object):
'''The RezzMe.uri.Uri object encapsulates virtual world resource identifiers.
A virtual world is identified by the hosting server, an
optional user/avatar name and password, an optional region name
with optional X/Y/Z coordinates. In addition RezzMe.uri.Uri
objects can also contain a tag (short label of the target
grid), a display tag (for use in menus), and the identifier of
the virtual world client to use.
'''
def __init__(self, uri = None, display = None, client = None, userId = None):
'''The Uri class encapsulates a RezzMe virtual world resource identifier.
To use RezzMe.Uri you need to import it
>>> import RezzMe.uri
A Uri object can be instantiated in several ways:
- from a string:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/')
>>> uri.PlainUri
'rezzme://opensim.foobar.com/'
note, that if the URI contains spaces, that they will be converted to '%20':
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/an island')
>>> uri.PlainUri
'rezzme://opensim.foobar.com/an%20island'
- from a dictionary as generated by RezzMe.parse.ParseUriAndPath():
>>> import RezzMe.parse
>>> import RezzMe.uri
>>> uriDict = RezzMe.parse.ParseUriAndPath('rezzme://opensim.foobar.com:9000/island')
>>> uri = RezzMe.uri.Uri(uriDict)
>>> uri.PlainUri
'rezzme://opensim.foobar.com:9000/island'
- from another Uri object (cloning it):
>>> aUri = RezzMe.uri.Uri('rezzme://dr%20who@opensim.foobar.com/region/1/2/3')
>>> anotherUri = RezzMe.uri.Uri(aUri)
>>> anotherUri.Avatar
'dr who'
>>> anotherUri.Avatar = 'mr torchwood'
>>> anotherUri.FullUri
'rezzme://mr%20torchwood@opensim.foobar.com/region/1/2/3'
>>> aUri.FullUri
'rezzme://dr%20who@opensim.foobar.com/region/1/2/3'
here we cloned anotherUri from aUri. aUri had as avatar 'dr who',
which anotherUri inherits (we are cloning aUri). we then change
the avatar value of anotherUri to 'mr torchwood' and retrieve the
FullUri property from both objects: aUri still has 'dr who' as avatar,
anotherUri has 'mr torchwood'.
Additional instantiation parameters are the tag, display,
client, and userId meta data parameters. The tag parameter
is short label of the target grid. The display parameter is
is used to display a Uri in GUI menus. The client parameter
is used to associate a particular virtual world client with
the URI object. The userId parameter finally can associate
a default userId with the Uri object.
Uri objects can also come as auto-login Uri: if avatar name
and password are contained in the Uri *and* the query part
of Uri contains 'auto' as parameter:
>>> autoUri = RezzMe.uri.Uri('rezzme://dr%20who:SECRET@opensim.foobar.com/region/1/2/3?auto')
>>> autoUri.AutoLogin
True
'''
if uri is None:
raise RezzMe.exceptions.RezzMeException('empty uri parameter')
self.__plain = None
self.__http = None
self.__safe = None
self.__auto = False
self.Extensions = {}
if isinstance(uri, str) or isinstance(uri, unicode):
uri = uri.replace(' ', '%20')
self.__dict = {}
self.__orig = uri
self._parse(uri)
elif type(uri) is types.DictType:
self.__dict = uri
self._sync()
self.__orig = self.FullUri
elif isinstance(uri, RezzMe.uri.Uri):
self.__dict = uri.__dict
self._sync()
self.__orig = uri.FullUri
self.Extensions = uri.Extensions
return
else:
raise RezzMe.exceptions.RezzMeException('unexpected uri type %s' % type(uri))
self.Display = display
self.Client = client
self.UserId = userId
# for k in self.__dict:
# logging.debug('uri.Uri: %s -> %s', k, self.__dict[k])
def _sync(self):
self.__plain = '%s://' % self.Scheme
self.__full = self.__plain
self.__safe = self.__plain
if self.Scheme == 'rezzme':
self.__http = 'http://'
else:
self.__http = 'https://'
if 'avatar' in self.__dict:
avatar = urllib.quote(self.__dict['avatar'])
self.__full += avatar
self.__safe += avatar
if 'password' in self.__dict:
self.__full += ':%s' % self.__dict['password']
self.__full += '@'
self.__safe += '@'
self.__plain += self.__dict['host']
self.__http += self.__dict['host']
self.__safe += self.__dict['host']
self.__full += self.__dict['host']
if 'port' in self.__dict:
port = ':%s' % self.__dict['port']
self.__plain += port
self.__http += port
self.__safe += port
self.__full += port
self.__plain += '/'
self.__safe += '/'
self.__full += '/'
self.__base = self.__plain
if 'region' in self.__dict:
self.__plain += self.__dict['region']
self.__safe += self.__dict['region']
self.__full += self.__dict['region']
if 'x' in self.__dict and 'y' in self.__dict and 'z' in self.__dict:
xyz = '/%s' % '/'.join(map(lambda x: str(x), self.XYZ))
self.__safe += xyz
self.__full += xyz
if 'query' in self.__dict:
q = self.__dict['query'].split('&')
self.__auto = 'auto' in q
def _parse(self, uri):
self.__dict = RezzMe.parse.ParseUriAndPath(uri)
if not self.__dict:
raise RezzMe.exceptions.RezzMeException('wonky URI >%s<' % uri)
if not self.Scheme: return None
self._sync()
def _plain(self):
return self.__plain
PlainUri = property(fget = _plain,
doc = '''plain URI without avatar name, avatar password and region X/Y/Z (read-only)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.zurich.ibm.com:9000/island/127/127/24')
>>> uri.PlainUri
'rezzme://opensim.zurich.ibm.com:9000/island'
This is a read-only property, writing to it will result in an exception:
>>> uri.PlainUri = 'rezzme://opensim.zurich.ibm.com:9000/island'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _base(self):
return self.__base
BaseUri = property(fget = _base,
doc = '''base URI without avatar name, avatar password and region (read-only)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.zurich.ibm.com:9000/island/127/127/24?query')
>>> uri.BaseUri
'rezzme://opensim.zurich.ibm.com:9000/'
This is a read-only property, writing to it will result in an exception:
>>> uri.BaseUri = 'rezzme://opensim.zurich.ibm.com:9000/'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _safe(self):
return self.__safe
SafeUri = property(fget = _safe,
doc = '''URI without password (read-only)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield:secret@opensim.zurich.ibm.com:9000/island/127/127/24?query')
>>> uri.FullUri
'rezzme://dr%20scofield:secret@opensim.zurich.ibm.com:9000/island/127/127/24'
>>> uri.SafeUri
'rezzme://dr%20scofield@opensim.zurich.ibm.com:9000/island/127/127/24'
This is a read-only property, writing to it will result in an exception:
>>> uri.SafeUri = 'rezzme://dr%20scofield@opensim.zurich.ibm.com:9000/island/127/127/24'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _full(self):
return self.__full
FullUri = property(fget = _full,
doc = '''full URI including avatar name and password if available (read-only)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield:secret@opensim.zurich.ibm.com:9000/island/127/127/24?query')
>>> uri.FullUri
'rezzme://dr%20scofield:secret@opensim.zurich.ibm.com:9000/island/127/127/24'
This is a read-only property, writing to it will result in an exception:
>>> uri.FullUri = 'rezzme://dr%20scofield:secret@opensim.zurich.ibm.com:9000/island/127/127/24'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _http(self):
return self.__http
BaseHttpUri = property(fget = _http,
doc = '''base HTTP URI of the server (without trailing "/") (read-only)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield:secret@opensim.zurich.ibm.com:9000/island/127/127/24?query')
>>> uri.BaseHttpUri
'http://opensim.zurich.ibm.com:9000'
This is a read-only property, writing to it will result in an exception:
>>> uri.BaseHttpUri = 'http://opensim.zurich.ibm.com:9000'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _keyValue(self, key):
if type(key) is types.StringType:
if key in self.__dict: return self.__dict[key]
elif type(key) is types.ListType:
return [self.__dict[x] if x in self.__dict else None for x in key]
return None
def _credentials(self):
return self._keyValue(['avatar', 'password'])
def _scredentials(self, value):
if value[0] is None and 'avatar' in self.__dict:
del self.__dict['avatar']
else:
if len(value[0].split()) != 2:
raise RezzMe.exceptions.RezzMeException('avatar name format violation, must be "First Last", found "%s"' % value[0])
self.__dict['avatar'] = value[0]
if value[1] is None and 'password' in self.__dict:
del self.__dict['password']
else:
self.__dict['password'] = value[1]
self._sync()
Credentials = property(fget = _credentials, fset = _scredentials,
doc = '''tuple containing (avatar name, password) (read-write)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield:secret@opensim.zurich.ibm.com:9000/island/127/127/24?query')
>>> uri.Credentials
['dr scofield', 'secret']
This is a read-write property:
>>> uri.Credentials = ['dr who', 'anothersecret']
>>> uri.Credentials
['dr who', 'anothersecret']
Note, that, as with RezzMe.parse, the avatar name has to follow
the format "First Last"; this will fail:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/island')
>>> uri.Credentials = ['drwho', 'secret']
Traceback (most recent call last):
...
RezzMeException: avatar name format violation, must be "First Last", found "drwho"
''')
def _avatar(self):
return self._keyValue('avatar')
def _savatar(self, value):
if value is None and 'avatar' in self.__dict:
del self.__dict['avatar']
else:
if len(value.split()) != 2:
raise RezzMe.exceptions.RezzMeException('avatar name format violation, must be "First Last", found "%s"' % value)
self.__dict['avatar'] = value
self._sync()
Avatar = property(fget = _avatar, fset = _savatar,
doc = '''avatar name (read-write)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.foobar.com/island')
>>> uri.Avatar
'dr scofield'
As this is a read-write property you can set the avatar name as well:
>>> uri.Avatar = 'dr who'
>>> uri.Avatar
'dr who'
Again, the avatar name has to follow the "First Last" pattern, this will fail:
>>> uri.Avatar = 'drwho'
Traceback (most recent call last):
...
RezzMeException: avatar name format violation, must be "First Last", found "drwho"
''')
def _client(self):
return self._keyValue('client')
def _sclient(self, value):
if value is None and 'client' in self.__dict:
del self.__dict['client']
else:
self.__dict['client'] = value
Client = property(fget = _client, fset = _sclient,
doc = '''client to use (read-write)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.foobar.com/island')
>>> uri.Client
>>> uri.Client = '/opt/SL/secondlife/secondlife-1.22.11.113941/secondlife'
>>> uri.Client
'/opt/SL/secondlife/secondlife-1.22.11.113941/secondlife'
''')
def _password(self):
return self._keyValue('password')
def _spassword(self, value):
if value is None and 'password' in self.__dict:
del self.__dict['password']
else:
self.__dict['password'] = value
self._sync()
Password = property(fget = _password, fset = _spassword,
doc = '''avatar password (read-write)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.foobar.com/island')
>>> uri.Credentials
['dr scofield', None]
>>> uri.Password
>>> uri.Password = 'secret'
>>> uri.Password
'secret'
Setting the password has an effect on Credentials and on FullUri:
>>> uri.Credentials
['dr scofield', 'secret']
>>> uri.FullUri
'rezzme://dr%20scofield:secret@opensim.foobar.com/island'
''')
def _fullyQualified(self):
return all((self.Avatar, self.Password))
FullyQualified =property(fget = _fullyQualified,
doc = '''True if this uri object contains both avatar name and password
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield:SECRET@opensim.foobar.com/island')
>>> uri.FullyQualified
True
but:
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.foobar.com/island')
>>> uri.FullyQualified
False
''')
def _autoLogin(self):
return self.__auto and self.FullyQualified
AutoLogin = property(fget = _autoLogin,
doc = '''True if this uri object is a auto-login Uri.
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield:SECRET@opensim.foobar.com/island?auto')
>>> uri.AutoLogin
True
but:
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.foobar.com/island?auto')
>>> uri.AutoLogin
False
''')
def _userId(self):
return self._keyValue('userID')
def _suserId(self, value):
if value is None and 'userID' in self.__dict:
del self.__dict['userID']
else:
self.__dict['userID'] = value
UserId = property(fget = _userId, fset = _suserId,
doc = '''user ID in case of authenticated grid (read-write)
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.foobar.com/island')
>>> uri.UserId
>>> uri.UserId = 'drscofield@xyzzyxyzzy.net'
>>> uri.UserId
'drscofield@xyzzyxyzzy.net'
UserId is a pure meta property in that it has no effect on other properties such as FullUri:
>>> uri.FullUri
'rezzme://dr%20scofield@opensim.foobar.com/island'
''')
def _display(self):
display = self._keyValue('display')
if not display:
display = ''
if self.Avatar:
display += '%s@' % self.Avatar
if self.Port:
display += 'rezzme://%s:%s' % (self.Host, self.Port)
else:
display += 'rezzme://%s' % self.Host
if self.Path:
display += '/%s' % self.Path
self.Display = display
return display
def _sdisplay(self, value):
if value is None and 'display' in self.__dict:
del self.__dict['display']
else:
self.__dict['display'] = value
self._sync()
Display = property(fget = _display, fset = _sdisplay,
doc = '''string that can be used in menus and so forth (read-write)
Unless explicitly set, the Display property will return a default value
constructed from other properties of the Uri object:
>>> uri = RezzMe.uri.Uri('rezzme://dr%20scofield@opensim.foobar.com/island')
>>> uri.Display
'dr scofield@rezzme://opensim.foobar.com/island'
Once set, Display will return that value instead:
>>> uri.Display = 'foobar island'
>>> uri.Display
'foobar island'
Even if we change another property that the default value of Display would use,
we still get the explicitly-set value:
>>> uri.Avatar = 'dr who'
>>> uri.Display
'foobar island'
''')
def _scheme(self):
return self._keyValue('scheme')
Scheme = property(fget = _scheme,
doc = '''URI scheme (read-only)
Usually returns "rezzme":
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com:9000/island/1/2/3')
>>> uri.Scheme
'rezzme'
or "rezzmes":
>>> uri = RezzMe.uri.Uri('rezzmes://opensim.foobar.com:9000/island/1/2/3')
>>> uri.Scheme
'rezzmes'
Note, that this is a read-only property, setting it will raise an exception:
>>> uri.Scheme = 'http'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _port(self):
return self._keyValue('port')
Port = property(fget = _port,
doc = '''URI port (if specified) (read-only)
Example with port provided:
>>> uri = RezzMe.uri.Uri('rezzmes://opensim.foobar.com:9000/island/1/2/3?query')
>>> uri.Port
'9000'
Example without port:
>>> uri = RezzMe.uri.Uri('rezzmes://opensim.foobar.com/island/1/2/3?query')
>>> uri.Port
Note, that Port is a read-only property; setting it will raise an exception:
>>> uri.Port = 4000
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _host(self):
return self._keyValue('host')
Host = property(fget = _host,
doc = '''URI host (read-only)
Sample code:
>>> uri = RezzMe.uri.Uri('rezzmes://opensim.foobar.com/island/1/2/3?query')
>>> uri.Host
'opensim.foobar.com'
Note, that Host is a read-only property; setting it will raise an exception:
>>> uri.Host = 'opensim.foo.bar.com'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _region(self):
return self._keyValue('region')
Region = property(fget = _region,
doc = '''URI region (if specified) (read-only)
Sample code:
>>> uri = RezzMe.uri.Uri('rezzmes://opensim.foobar.com/island/1/2/3?query')
>>> uri.Region
'island'
Note, that Region is a read-only property; setting it will raise an exception:
>>> uri.Host = 'wonderland'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _decodedRegion(self):
return urllib.unquote(self._keyValue('region'))
DecodedRegion = property(fget = _decodedRegion,
doc = '''Decoded URI region (if specified) (read-only)
Sample code:
>>> uri = RezzMe.uri.Uri('rezzmes://opensim.foobar.com/island/1/2/3?query')
>>> uri.DecodedRegion
'island'
but:
>>> uri = RezzMe.uri.Uri('rezzmes://opensim.foobar.com/treasure%20island/1/2/3?query')
>>> uri.DecodedRegion
'treasure island'
Note, that Region is a read-only property; setting it will raise an exception:
>>> uri.Host = 'wonderland'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _xyz(self):
return self._keyValue(['x', 'y', 'z'])
XYZ = property(fget = _xyz,
doc = '''tuple containing (X, Y, Z) coordinates (if specified) (read-only)
Sample code:
>>> uri = RezzMe.uri.Uri('rezzmes://opensim.foobar.com/island/1/2/3?query')
>>> uri.XYZ
[1, 2, 3]
Note, that XYZ is a read-only property; setting it will raise an exception:
>>> uri.XYZ = [4, 5, 6]
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _location(self):
if self.Region and all(self.XYZ):
return 'secondlife://%s/%s' % (self.Region, '/'.join([str(x) for x in self.XYZ])) # map(lambda x: str(x), self.XYZ)))
elif self.Region:
return 'secondlife://%s' % self.Region
else:
return None
Location = property(fget = _location,
doc = '''location with in the target grid as a secondlife:// URI (read-only)
Sample code:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/island/1/2/3')
>>> uri.Location
'secondlife://island/1/2/3'
or:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/wonder%20land/1/2/3')
>>> uri.Location
'secondlife://wonder%20land/1/2/3'
Note, that without a region we will get a None as return value:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/')
>>> uri.Location
Finally, Location is a read-only property; setting it will raise an exception:
>>> uri.Location = 'secondlife://myland/1/2/3'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _path(self):
return self._keyValue('path')
Path = property(fget = _path,
doc = '''URI region(X/Y/Z)? component (if available) (read-only)
Sample code:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33')
>>> uri.Path
'myland/127/128/33'
but also:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/')
>>> uri.Path
Note, Path is a read-only property; setting it will raise an exception:
>>> uri.Path = 'yourland/127/126/32'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _query(self):
return self._keyValue('query')
Query = property(fget = _query,
doc = '''URI query component (if available)
Sample code:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33?query&p0=a&p1=b')
>>> uri.Query
'query&p0=a&p1=b'
but also:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33')
>>> uri.Query
Note, Query is a read-only property; setting it will raise an exception:
>>> uri.Query = 'query&p0=x&p1=y'
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def _dict(self):
return self.__dict
Dict = property(fget = _dict,
doc = '''returns dictionary with all recognized components (read-only)
Sample code:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33')
>>> uri.Dict
{'z': 33, 'plain': 'rezzme://opensim.foobar.com', 'region': 'myland', 'userID': None, 'slurl': 'secondlife:///myland/127/128/33', 'host': 'opensim.foobar.com', 'client': None, 'x': 127, 'y': 128, 'path': 'myland/127/128/33', 'scheme': 'rezzme', 'display': None}
Note, Dict is a read-only property; setting it will raise an exception:
>>> uri.Dict = {'z': 33, 'plain': 'rezzme://opensim.foobar.com', 'region': 'myland', 'userID': None, 'slurl': 'secondlife:///myland/127/128/33', 'host': 'opensim.foobar.com', 'client': None, 'x': 127, 'y': 128, 'path': 'yourland', 'scheme': 'rezzme', 'display': None}
Traceback (most recent call last):
...
AttributeError: can't set attribute
''') #'
def __cmp__(self, other):
'''Override the comparison method and compare on FullUri:
Sample code:
>>> uri0 = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33')
>>> uri1 = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33')
>>> uri0 == uri1
True
and:
>>> uri0 = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33')
>>> uri1 = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/3')
>>> uri0 == uri1
False
As this will only compare on FullUri, meta properties will not be taken into
account:
>>> uri0 = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33')
>>> uri1 = RezzMe.uri.Uri('rezzme://opensim.foobar.com/myland/127/128/33')
>>> uri0 == uri1
True
'''
return cmp(self.FullUri, other.FullUri)
def __hash__(self):
'''Override the hash method to use __hash__ of FullUri instead.
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/')
>>> uri.__hash__()
-968694205
'''
return self.FullUri.__hash__()
def __str__(self):
'''Override the str representation method and return all relevant properties.
Sample code:
>>> uri = RezzMe.uri.Uri('rezzme://opensim.foobar.com/')
>>> str(uri)
'rezzme://opensim.foobar.com/ client: None/userId: None/display: rezzme://opensim.foobar.com'
Adding meta property values:
>>> uri.Client = '/path/to/client'
>>> str(uri)
'rezzme://opensim.foobar.com/ client: /path/to/client/userId: None/display: rezzme://opensim.foobar.com'
>>> uri.UserId = 'drscofield@foobar.com'
>>> str(uri)
'rezzme://opensim.foobar.com/ client: /path/to/client/userId: drscofield@foobar.com/display: rezzme://opensim.foobar.com'
'''
return '%s client: %s/userId: %s/display: %s' % (self.FullUri, self.Client, self.UserId, self.Display)
| bsd-3-clause | -4,486,795,777,950,281,700 | 42.547962 | 300 | 0.451637 | false | 4.648534 | false | false | false |
berndhahnebach/BOLTS | freecad/extrusions/wheels.py | 2 | 5455 | # **************************************************************************************
# * *
# * BOLTS - Open Library of Technical Specifications *
# * *
# * Copyright (C) 2014 Johannes Reinhardt <jreinhardt@ist-dein-freund.de> *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Lesser General Public *
# * License as published by the Free Software Foundation; either *
# * version 2.1 of the License, or any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with this library; if not, write to the Free Software *
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA *
# * *
# **************************************************************************************
import Part
from FreeCAD import Vector
from Part import makeLine
def dualvwheel(params, document):
# no params
name = params["name"]
# still name some quantities
r_1 = 0.5 * 13.89
r_2 = 0.5 * 15.974
r_3 = 9.77
r_4 = 0.5 * 18.75
r_5 = 0.5 * 24.39
# profile for revolution is symmetric, therefore only points from right half
vertices = [
(0, r_1, 0),
(0.5, r_1, 0),
(0.5, r_2, 0),
(0.5 * 10.23 - 0.3, r_2, 0),
(0.5 * 10.23, r_2 + 0.3, 0),
(0.5 * 10.23, r_3, 0),
(0.5 * (10.23 - 4.84), r_5, 0),
(0.5 * (10.23) - 4.84, r_3, 0),
(0.5 * (10.23) - 4.84, r_4, 0),
(0, r_4, 0)
]
lines = []
vlast = None
vcur = Vector(vertices[0])
# right half
for i in range(1, len(vertices)):
vlast = vcur
vcur = Vector(vertices[i])
lines.append(makeLine(vcur, vlast))
# left half
for i in range(len(vertices) - 2, -1, -1):
vlast = vcur
vcur = Vector(vertices[i])
vcur[0] *= -1
lines.append(makeLine(vcur, vlast))
part = document.addObject("Part::Feature", "BOLTS_part")
part.Label = name
part.Shape = Part.Face(
Part.Wire(lines)
).revolve(Vector(0, 0, 0), Vector(1, 0, 0), 360).removeSplitter()
def solidvwheel(params, document):
# no params
name = params["name"]
# still name some quantities
r_1 = 0.5 * 13.89
r_2 = 0.5 * 15.974
r_3 = 9.77
r_4 = 0.5 * 23.89
# profile for revolution is symmetric, therefore only points from right half
vertices = [
(0, r_1, 0),
(0.5, r_1, 0),
(0.5, r_2, 0),
(0.5 * 10.23 - 0.3, r_2, 0),
(0.5 * 10.23, r_2 + 0.3, 0),
(0.5 * 10.23, r_3, 0),
(0.5 * 5.89, r_4, 0),
(0, r_4, 0),
]
lines = []
vlast = None
vcur = Vector(vertices[0])
# right half
for i in range(1, len(vertices)):
vlast = vcur
vcur = Vector(vertices[i])
lines.append(makeLine(vcur, vlast))
# left half
for i in range(len(vertices) - 2, -1, -1):
vlast = vcur
vcur = Vector(vertices[i])
vcur[0] *= -1
lines.append(makeLine(vcur, vlast))
part = document.addObject("Part::Feature", "BOLTS_part")
part.Label = name
part.Shape = Part.Face(
Part.Wire(lines)
).revolve(Vector(0, 0, 0), Vector(1, 0, 0), 360).removeSplitter()
def minivwheel(params, document):
# no params
name = params["name"]
# still name some quantities
r_1 = 0.5 * 8.64
r_2 = 0.5 * 9.974
r_3 = 0.5 * 12.21
r_4 = 0.5 * 15.23
# profile for revolution is symmetric, therefore only points from right half
vertices = [
(0, r_1, 0),
(0.5, r_1, 0),
(0.5, r_2, 0),
(0.5 * 8.8 - 0.3, r_2, 0),
(0.5 * 8.8, r_2 + 0.3, 0),
(0.5 * 8.8, r_3, 0),
(0.5 * 5.78, r_4, 0),
(0, r_4, 0),
]
lines = []
vlast = None
vcur = Vector(vertices[0])
# right half
for i in range(1, len(vertices)):
vlast = vcur
vcur = Vector(vertices[i])
lines.append(makeLine(vcur, vlast))
# left half
for i in range(len(vertices) - 2, -1, -1):
vlast = vcur
vcur = Vector(vertices[i])
vcur[0] *= -1
lines.append(makeLine(vcur, vlast))
part = document.addObject("Part::Feature", "BOLTS_part")
part.Label = name
part.Shape = Part.Face(
Part.Wire(lines)
).revolve(Vector(0, 0, 0), Vector(1, 0, 0), 360).removeSplitter()
| gpl-3.0 | 1,902,312,656,742,137,900 | 31.088235 | 88 | 0.449129 | false | 3.422208 | false | false | false |
skosukhin/spack | var/spack/repos/builtin/packages/eccodes/package.py | 1 | 4224 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class Eccodes(CMakePackage):
"""ecCodes is a package developed by ECMWF for processing meteorological
data in GRIB (1/2), BUFR (3/4) and GTS header formats."""
homepage = "https://software.ecmwf.int/wiki/display/ECC/ecCodes+Home"
url = "https://software.ecmwf.int/wiki/download/attachments/45757960/eccodes-2.2.0-Source.tar.gz?api=v2"
list_url = "https://software.ecmwf.int/wiki/display/ECC/Releases"
version('2.5.0', '5a7e92c58418d855082fa573efd352aa')
version('2.2.0', 'b27e6f0a3eea5b92dac37372e4c45a62')
variant('netcdf', default=False,
description='Enable GRIB to NetCDF conversion tool')
variant('jp2k', default='openjpeg', values=('openjpeg', 'jasper', 'none'),
description='Specify JPEG2000 decoding/encoding backend')
variant('png', default=False,
description='Enable PNG support for decoding/encoding')
variant('aec', default=False,
description='Enable Adaptive Entropy Coding for decoding/encoding')
variant('pthreads', default=False,
description='Enable POSIX threads')
variant('openmp', default=False,
description='Enable OpenMP threads')
variant('memfs', default=False,
description='Enable memory based access to definitions/samples')
variant('python', default=False,
description='Enable the Python interface')
variant('fortran', default=True, description='Enable the Fortran support')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo', 'Production'))
depends_on('netcdf', when='+netcdf')
depends_on('openjpeg', when='jp2k=openjpeg')
depends_on('jasper', when='jp2k=jasper')
depends_on('libpng', when='+png')
depends_on('libaec', when='+aec')
depends_on('python@:2', when='+python')
depends_on('py-numpy', when='+python', type=('build', 'run'))
extends('python', when='+python')
conflicts('+openmp', when='+pthreads',
msg='Cannot enable both POSIX threads and OMP')
# The following enforces linking against the specified JPEG2000 backend.
patch('enable_only_openjpeg.patch', when='jp2k=openjpeg')
patch('enable_only_jasper.patch', when='jp2k=jasper')
def cmake_args(self):
variants = ['+netcdf', '+png', '+aec', '+pthreads',
'+openmp', '+memfs', '+python', '+fortran']
options = ['NETCDF', 'PNG', 'AEC', 'ECCODES_THREADS',
'ECCODES_OMP_THREADS', 'MEMFS', 'PYTHON', 'FORTRAN']
args = map(lambda var, opt:
"-DENABLE_%s=%s" %
(opt, 'ON' if var in self.spec else 'OFF'),
variants,
options)
if self.spec.variants['jp2k'].value == 'none':
args.append('-DENABLE_JPG=OFF')
else:
args.append('-DENABLE_JPG=ON')
return args
| lgpl-2.1 | -5,401,977,492,162,261,000 | 43.93617 | 113 | 0.6366 | false | 3.795148 | false | false | false |
chemreac/chemreac | chemreac/util/banded.py | 2 | 2152 | # -*- coding: utf-8 -*-
"""
chemreac.util.banded
--------------------
this module contains functions to deal with banded matrices.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
def get_banded(A, n, N, n_jac_diags=1, order='C', padded=False):
""" Turns a dense matrix into a banded one
Turns a dense matrix (n·N) × (n·N) into a banded matrix
including the diagonal and n super-diagonals and n sub-diagonals.
Parameters
----------
A: 2-dimensional square matrix
n: int
sub-block dimension
N: int
number of super-blocks
n_jac_diags: int
number of diagonals (default: 1)
order: {'C', 'F'}, optional
C- or Fortran-contiguous
padded: bool, optional
default: False, if True: A is padded with n rows along the top
Raises
------
ValueError on mismatch of A.shape and n*N
"""
if A.shape != (n*N, n*N):
raise ValueError("Shape of A != (n*N, n*N)")
nouter = n * n_jac_diags
B = np.zeros(((3 if padded else 2)*nouter + 1, n*N), order=order)
for ri in range(n*N):
for ci in range(max(0, ri-nouter), min(n*N, ri+nouter+1)):
B[(2 if padded else 1)*nouter+ri-ci, ci] = A[ri, ci]
return B
def get_jac_row_from_banded(J, rows, n, n_jac_diags=1):
"""
Extracts rows from a banded matrix J
Parameters
----------
J: 2-dimensional array
Source matrix with banded storage.
rows: sequence
indices of rows to extract
n: integer
row length
n_jac_diags: integer
number of diagonals (default: 1)
"""
out = np.empty((len(rows), n))
nouter = n * n_jac_diags
for ri in rows:
for ci in range(n):
out[rows.index(ri), ci] = J[nouter+ri-ci, ci]
return out
def get_dense(A, n, N, padded=False, n_jac_diags=1):
out = np.zeros((n*N, n*N))
nouter = n * n_jac_diags
diag_offset = 2*nouter if padded else nouter
for ri in range(n*N):
for ci in range(max(0, ri-nouter), min(n*N, ri+nouter+1)):
out[ri, ci] = A[diag_offset+ri-ci, ci]
return out
| bsd-2-clause | 6,821,952,217,844,712,000 | 26.551282 | 70 | 0.581666 | false | 3.137226 | false | false | false |
fna/owning-a-home-api | ratechecker/migrations/0003_auto__add_fee__add_unique_fee_product_id_state_id_lender_single_family.py | 2 | 8879 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Fee'
db.create_table(u'ratechecker_fee', (
('fee_id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('plan', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ratechecker.Product'])),
('product_id', self.gf('django.db.models.fields.IntegerField')()),
('state_id', self.gf('localflavor.us.models.USStateField')(max_length=2)),
('lender', self.gf('django.db.models.fields.CharField')(max_length=16)),
('single_family', self.gf('django.db.models.fields.BooleanField')(default=True)),
('condo', self.gf('django.db.models.fields.BooleanField')(default=False)),
('coop', self.gf('django.db.models.fields.BooleanField')(default=False)),
('origination_dollar', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
('origination_percent', self.gf('django.db.models.fields.DecimalField')(max_digits=6, decimal_places=3)),
('third_party', self.gf('django.db.models.fields.DecimalField')(max_digits=8, decimal_places=2)),
('data_timestamp', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'ratechecker', ['Fee'])
# Adding unique constraint on 'Fee', fields ['product_id', 'state_id', 'lender', 'single_family', 'condo', 'coop']
db.create_unique(u'ratechecker_fee', ['product_id', 'state_id', 'lender', 'single_family', 'condo', 'coop'])
def backwards(self, orm):
# Removing unique constraint on 'Fee', fields ['product_id', 'state_id', 'lender', 'single_family', 'condo', 'coop']
db.delete_unique(u'ratechecker_fee', ['product_id', 'state_id', 'lender', 'single_family', 'condo', 'coop'])
# Deleting model 'Fee'
db.delete_table(u'ratechecker_fee')
models = {
u'ratechecker.adjustment': {
'Meta': {'object_name': 'Adjustment'},
'adj_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '3'}),
'affect_rate_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'data_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_fico': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'max_loan_amt': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'}),
'max_ltv': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '3'}),
'min_fico': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'min_loan_amt': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2'}),
'min_ltv': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '3'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ratechecker.Product']"}),
'prop_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'rule_id': ('django.db.models.fields.IntegerField', [], {}),
'state': ('localflavor.us.models.USStateField', [], {'max_length': '2', 'null': 'True'})
},
u'ratechecker.fee': {
'Meta': {'unique_together': "(('product_id', 'state_id', 'lender', 'single_family', 'condo', 'coop'),)", 'object_name': 'Fee'},
'condo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'coop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'fee_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lender': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'origination_dollar': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'origination_percent': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ratechecker.Product']"}),
'product_id': ('django.db.models.fields.IntegerField', [], {}),
'single_family': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'state_id': ('localflavor.us.models.USStateField', [], {'max_length': '2'}),
'third_party': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'})
},
u'ratechecker.product': {
'Meta': {'object_name': 'Product'},
'adj_period': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'ai_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '4'}),
'annual_cap': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'arm_index': ('django.db.models.fields.CharField', [], {'max_length': '96', 'null': 'True'}),
'arm_margin': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '4'}),
'condo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'coop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'int_adj_cap': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'int_adj_term': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'io': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'loan_cap': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'loan_purpose': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'loan_term': ('django.db.models.fields.IntegerField', [], {}),
'loan_type': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'max_fico': ('django.db.models.fields.IntegerField', [], {}),
'max_loan_amt': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'max_ltv': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '3'}),
'min_fico': ('django.db.models.fields.IntegerField', [], {}),
'min_loan_amt': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'min_ltv': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '3'}),
'plan_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'pmt_type': ('django.db.models.fields.CharField', [], {'default': "'FIXED'", 'max_length': '12'}),
'single_family': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'ratechecker.rate': {
'Meta': {'object_name': 'Rate'},
'base_rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'}),
'data_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'lock': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ratechecker.Product']"}),
'rate_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'region_id': ('django.db.models.fields.IntegerField', [], {}),
'total_points': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '3'})
},
u'ratechecker.region': {
'Meta': {'object_name': 'Region'},
'data_timestamp': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'state_id': ('localflavor.us.models.USStateField', [], {'max_length': '2'})
}
}
complete_apps = ['ratechecker']
| cc0-1.0 | -4,739,723,807,337,827,000 | 72.991667 | 139 | 0.565829 | false | 3.540271 | false | false | false |
cmos3511/cmos_linux | python/op/nop/core/op_top.py | 1 | 7885 | """
Description: op platform top entrence
"""
import argparse
from utils import pcom
from conf import settings
from core import runner_admin
from core import runner_init
from core import runner_flow
from core import runner_backup
from lic.op_lic import OPClient
LOG = pcom.gen_logger(__name__)
def gen_admin_parser(subparsers):
"""to generate admin parser"""
admin_parser = subparsers.add_parser(
"admin",
help="sub cmd about kicking off project related actions")
me_group = admin_parser.add_mutually_exclusive_group()
me_group.add_argument(
"-list", dest="admin_list_proj", action="store_true",
help="toggle to list all currently available proj names")
me_group.add_argument(
"-list_lab", dest="admin_list_lab", action="store_true",
help="toggle to list all currently available lab names")
me_group.add_argument(
"-p", dest="admin_proj_name",
help="input the proj name which will be kicked off")
me_group.add_argument(
"-b", dest="admin_block_lst", nargs="+",
help="input the block names to be initialized in the specified project")
me_group.add_argument(
"-update_blk", dest="admin_update_blk", nargs="*",
help="toggle or input blocks to update blocks directory according to RELEASE directory")
me_group.add_argument(
"-list_lib", dest="admin_list_lib", action="store_true",
help="toggle to list all available lib process names")
me_group.add_argument(
"-lib", dest="admin_lib", nargs="*",
help="toggle or input lib processes to generate library mapping links and related files")
me_group.add_argument(
"-lib_type", dest="admin_lib_type", nargs="+",
help="input the lib_types, e.g. std, tech, mem, ip, io")
me_group.add_argument(
"-release_check", dest="admin_release_check", action="store_true",
help="toggle to check the block released json files")
me_group.add_argument(
"-release", dest="admin_release", action="store_true",
help="toggle to release block files")
admin_parser.set_defaults(func=main_admin)
def main_admin(args):
"""init sub cmd top function"""
runner_admin.run_admin(args)
def gen_init_parser(subparsers):
"""to generate init parser"""
init_parser = subparsers.add_parser(
"init",
help="sub cmd about generating initial project directories")
me_group = init_parser.add_mutually_exclusive_group()
me_group.add_argument(
"-list", dest="init_list_proj", action="store_true",
help="toggle to list all currently available proj names")
me_group.add_argument(
"-list_lab", dest="init_list_lab", action="store_true",
help="toggle to list all currently available lab names")
me_group.add_argument(
"-p", dest="init_proj_name",
help="input the proj name which will be check out from repository")
init_parser.add_argument(
"-b", dest="init_block_name_lst", nargs="+",
help="input the block name which will be check out from repository")
init_parser.set_defaults(func=main_init)
def main_init(args):
"""init sub cmd top function"""
runner_init.run_init(args)
def gen_flow_parser(subparsers):
"""to generate flow parser"""
flow_parser = subparsers.add_parser(
"flow",
help="sub cmd about running and controlling backend flows")
me_group = flow_parser.add_mutually_exclusive_group()
me_group.add_argument(
"-list_env", dest="flow_list_env", action="store_true",
help="toggle to list all internal environment variables")
me_group.add_argument(
"-list_blk", dest="flow_list_blk", action="store_true",
help="toggle to list all available blocks")
me_group.add_argument(
"-list_flow", dest="flow_list_flow", action="store_true",
help="toggle to list all available flows")
me_group.add_argument(
"-list_diff", dest="flow_list_diff", nargs='?', const='DEFAULT',
metavar='FLOW_NAME',
help="""toggle to demonstrate the diff between block level config/plugins
and proj level one (default: FLOW_NAME=DEFAULT)""")
me_group.add_argument(
"-init", dest="flow_init_lst", nargs="+",
help="input flow initial name list to generate flow config files")
me_group.add_argument(
"-gen", dest="flow_gen_lst", nargs="*",
help="toggle and input flows to generate flow run files")
me_group.add_argument(
"-run", dest="flow_run_lst", nargs="*",
help="toggle and input flows to run flow")
flow_parser.add_argument(
"-force", dest="flow_force", default=False, nargs="?",
help="toggle and input begin sub-stage to run force to ignore last status")
flow_parser.add_argument(
"-auto_release", dest="flow_auto_release", action="store_true",
help="for ext sub_stage auot release")
flow_parser.add_argument(
"-auto_run", dest="flow_auto_run", action="store_true",
help="for sta flow auto run")
flow_parser.add_argument(
"-begin", dest="flow_begin", default="",
help="input begin sub-stage to run")
flow_parser.add_argument(
"-end", dest="flow_end", default="",
help="input end sub_stage to run")
flow_parser.add_argument(
"-c", dest="flow_comment", default="",
help="input flow comments to be shown and distinguished with others")
me_group.add_argument(
"-show_var", dest="flow_show_var_lst", nargs="*",
help="toggle and input flows to list all variables passed to templates")
me_group.add_argument(
"-restore", dest="flow_restore", default="",
help="input flow::stage:sub-stage to restore")
me_group.add_argument(
"-release", dest="flow_release_lst", nargs="+",
help="input multiple flow::stage:sub-stage to release")
flow_parser.add_argument(
"-yes", dest="flow_cfm_yes", action="store_true",
help="toggle flows to give all yes response to flow inline prompt hint")
flow_parser.set_defaults(func=main_flow)
def main_flow(args):
"""flow sub cmd top function"""
runner_flow.run_flow(args)
def gen_backup_parser(subparsers):
"""to generate backup parser"""
backup_parser = subparsers.add_parser(
"backup",
help="sub cmd about backup project directories")
me_group = backup_parser.add_mutually_exclusive_group()
me_group.add_argument(
"-p", dest="backup_proj_name",
help="input the proj name which to be backup by super user")
backup_parser.set_defaults(func=main_backup)
def main_backup(args):
"""backup sub cmd top function"""
runner_backup.run_backup(args)
def gen_args_top():
"""to generate top args help for op"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", dest="version", action="store_true",
help="show op version info and exit")
subparsers = parser.add_subparsers()
gen_admin_parser(subparsers)
gen_init_parser(subparsers)
gen_flow_parser(subparsers)
gen_backup_parser(subparsers)
return parser.parse_args()
def main():
"""op top function"""
args = gen_args_top()
if args.version:
print("OnePiece Platform Version: op 4.0.0")
return
if not settings.DEBUG:
opclient = OPClient()
opclient.set_license_server()
opclient.checkout_license()
if hasattr(args, "func"):
try:
args.func(args)
except KeyboardInterrupt:
LOG.critical("op terminated")
except SystemExit:
LOG.critical("op failed")
else:
LOG.info("op completed")
else:
LOG.critical("sub cmd is NA, please use -h to check all sub cmds")
if not settings.DEBUG:
opclient.checkin_license()
| gpl-3.0 | 7,957,769,981,726,652,000 | 39.025381 | 97 | 0.639949 | false | 3.829529 | false | false | false |
idjaw/keystone | keystone/contrib/endpoint_filter/migrate_repo/versions/002_add_endpoint_groups.py | 14 | 1658 | # Copyright 2014 Hewlett-Packard Company
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
endpoint_group_table = sql.Table(
'endpoint_group',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('name', sql.String(255), nullable=False),
sql.Column('description', sql.Text, nullable=True),
sql.Column('filters', sql.Text(), nullable=False))
endpoint_group_table.create(migrate_engine, checkfirst=True)
project_endpoint_group_table = sql.Table(
'project_endpoint_group',
meta,
sql.Column('endpoint_group_id', sql.String(64),
sql.ForeignKey('endpoint_group.id'), nullable=False),
sql.Column('project_id', sql.String(64), nullable=False),
sql.PrimaryKeyConstraint('endpoint_group_id',
'project_id'))
project_endpoint_group_table.create(migrate_engine, checkfirst=True)
| apache-2.0 | 953,416,890,962,155,500 | 39.439024 | 75 | 0.681544 | false | 4.043902 | false | false | false |
Dingmatt/AMSA | Plug-ins/Amsa.bundle/Contents/Code/tvdb.py | 1 | 12335 | import constants, functions, lxml, logging
from functions import XMLFromURL, GetElementText
from lxml import etree
from lxml.builder import E
from lxml.etree import Element, SubElement, Comment
def ParseNoFromSeason(season, episode, default):
#if season == 0 and episode == 0:
# return "S" + str(default).zfill(2) + "E00"
#else:
return "S" + str(season).zfill(2) + "E" + str(episode).zfill(2)
class TvDB(constants.Series):
def __init__(self, id):
logging.Log_Milestone("TvDB" + "_" + id)
self.ID = id
self.MetaType = "Tvdb"
data = XMLFromURL(constants.TVDB_HTTP_API_URL % id, id + ".xml", os.path.join("TvDB", id), CACHE_1HOUR * 24).xpath("""/Data""")
if data != None:
data = data[0]
##--------------------------------Title--------------------------------##
if GetElementText(data, "Series/SeriesName"):
self.Title = str(GetElementText(data, "Series/SeriesName")).encode('utf-8').strip().translate(constants.ReplaceChars)
##--------------------------------Summary------------------------------##
if GetElementText(data, "Series/Overview"):
self.Summary = GetElementText(data, "Series/Overview")
##--------------------------------Originally_Available_At--------------##
if GetElementText(data, "Series/FirstAired"):
self.Originally_Available_At = GetElementText(data, "Series/FirstAired")
##--------------------------------Rating-------------------------------##
if GetElementText(data, "Series/Rating"):
self.Rating = GetElementText(data, "Series/Rating")
##--------------------------------Studio-------------------------------##
if GetElementText(data, "Series/Network"):
self.Studio = GetElementText(data, "Series/Network")
##--------------------------------Countries----------------------------##
##--------------------------------Genres-------------------------------##
if GetElementText(data, "Series/Genre"):
self.Genres = filter(None, GetElementText(data, "Series/Genre").split("|"))
##--------------------------------Tags---------------------------------##
##--------------------------------Collections--------------------------##
##--------------------------------Content_Rating-----------------------##
if GetElementText(data, "Series/ContentRating"):
self.Content_Rating = GetElementText(data, "Series/ContentRating")
##--------------------------------Writers------------------------------##
##--------------------------------Directors----------------------------##
##--------------------------------Producers----------------------------##
##--------------------------------Roles--------------------------------##
self.Roles = []
##--------------------------------Images-------------------------------##
banners = []
bannersXml = XMLFromURL(constants.TVDB_BANNERS_URL % id, id + "_banners.xml", os.path.join("TvDB", id), CACHE_1HOUR * 24)
if bannersXml:
art = etree.tostring(E.Images(), pretty_print=True, xml_declaration=True, encoding="UTF-8")
art = XML.ElementFromString(art)
artCount = 2
posters = etree.tostring(E.Images(), pretty_print=True, xml_declaration=True, encoding="UTF-8")
posters = XML.ElementFromString(posters)
postersCount = 2
banners = etree.tostring(E.Images(), pretty_print=True, xml_declaration=True, encoding="UTF-8")
banners = XML.ElementFromString(banners)
bannersCount = 2
season = etree.tostring(E.Images(), pretty_print=True, xml_declaration=True, encoding="UTF-8")
season = XML.ElementFromString(season)
seasonCount = []
for banner in sorted(bannersXml.xpath("./Banner"), key=lambda x: float(GetElementText(x, "Rating", 0)) , reverse=True):
bannerType = GetElementText(banner, "BannerType")
bannerType2 = GetElementText(banner, "BannerType2")
bannerPath = GetElementText(banner, "BannerPath")
bannerThumb = GetElementText(banner, "ThumbnailPath")
if bannerThumb == None or bannerThumb == "":
bannerThumb = os.path.splitext(bannerPath)[0] + '_t' + os.path.splitext(bannerPath)[1]
metatype = ("art" if bannerType == "fanart" else \
"posters" if bannerType == "poster" else \
"banners" if bannerType == "series" or bannerType2=="seasonwide" else \
"season" if bannerType == "season" and bannerType2=="680x1000" else \
"season" if bannerType == "season" and bannerType2=="season" else None)
#Log("Images: %s, %s, %s, %s, %s" % (bannerPath, constants.TVDB_IMAGES_URL, id, metatype, bannerThumb))
mainUrl, thumbUrl, mainLocalPath, thumbLocalPath = functions.ParseImage(bannerPath, constants.TVDB_IMAGES_URL, os.path.join("TvDB", id, metatype), bannerThumb)
if metatype == "art":
SubElement(art, "Image", id = str(1 if bannerPath == GetElementText(data, "Series/fanart") else artCount), mainUrl = mainUrl, thumbUrl = thumbUrl, mainLocalPath = mainLocalPath, thumbLocalPath = thumbLocalPath)
artCount = artCount + 1
if metatype == "posters":
SubElement(posters, "Image", id = str(1 if bannerPath == GetElementText(data, "Series/poster") else postersCount), mainUrl = mainUrl, thumbUrl = thumbUrl, mainLocalPath = mainLocalPath, thumbLocalPath = thumbLocalPath)
postersCount = postersCount + 1
if metatype == "banners":
SubElement(banners, "Image", id = str(1 if bannerPath == GetElementText(data, "Series/banner") else bannersCount), mainUrl = mainUrl, thumbUrl = thumbUrl, mainLocalPath = mainLocalPath, thumbLocalPath = thumbLocalPath)
bannersCount = bannersCount + 1
if metatype == "season":
seasonCount.append(GetElementText(banner, "Season"))
SubElement(season, "Image", id = str(seasonCount.count(GetElementText(banner, "Season"))), mainUrl = mainUrl, thumbUrl = thumbUrl, mainLocalPath = mainLocalPath, thumbLocalPath = thumbLocalPath, season = str(GetElementText(banner, "Season")))
self.Art = art
self.Posters = posters
self.Banners = banners
self.Season = season
##--------------------------------Themes-------------------------------##
self.Themes = []
##--------------------------------EpisodeCount-------------------------##
self.EpisodeCount = len(data.xpath("""./Episode/SeasonNumber[text()>0]"""))
##--------------------------------SpecialCount-------------------------##
self.SpecialCount = len(data.xpath("""./Episode/SeasonNumber[text()=0]"""))
##--------------------------------Duration-----------------------------##
if GetElementText(data, "Series/Runtime"):
self.Duration = int(int(self.EpisodeCount) * int(GetElementText(data, "Series/Runtime")))
##--------------------------------OP/ED_List---------------------------##
self.OpList = []
self.EdList = []
##--------------------------------Episodes-----------------------------##
if len(data.xpath("""./Episode""")) > 0:
self.Episodes = []
for item in data.xpath("""./Episode"""):
self.Episodes.append(self.Episode(item, id))
#Log("AniDB - __init__() - Populate Title: '%s', Network: '%s', Overview: '%s', FirstAired: '%s', Genre: '%s', ContentRating: '%s', Rating: '%s', Episodes: '%s', EpisodeCount: '%s', SpecialCount: '%s', OpedCount: '%s', Posters: '%s'"
#% (self.Title, self.Network, self.Overview, self.FirstAired, self.Genre, self.ContentRating, self.Rating, self.Episodes, self.EpisodeCount, self.SpecialCount, self.OpedCount, self.Posters) )
logging.Log_Milestone("TvDB" + "_" + id)
class Episode(constants.Episode):
def __init__(self, data, id):
##--------------------------------Title--------------------------------##
if GetElementText(data, "EpisodeName"):
self.Title = str(GetElementText(data, "EpisodeName")).encode('utf-8').strip().translate(constants.ReplaceChars)
##--------------------------------Summary------------------------------##
if GetElementText(data, "Overview"):
self.Summary = GetElementText(data, "Overview")
##--------------------------------Originally_Available_At--------------##
if GetElementText(data, "FirstAired" ):
self.Originally_Available_At = GetElementText(data, "FirstAired")
##--------------------------------Rating-------------------------------##
if GetElementText(data, "Rating"):
self.Rating = GetElementText(data, "Rating")
##--------------------------------Absolute_Index-----------------------##
if GetElementText(data, "absolute_number"):
self.Absolute_Index = int(GetElementText(data, "absolute_number"))
##--------------------------------Writers------------------------------##
if GetElementText(data, "Writer"):
if self.Writers is None: self.Writers = []
self.Writers.append(GetElementText(data, "Writer"))
##--------------------------------Directors----------------------------##
if GetElementText(data, "Director"):
if self.Directors is None: self.Directors = []
self.Directors.append(GetElementText(data, "Director"))
##--------------------------------Producers----------------------------##
##--------------------------------Thumbs-------------------------------##
if GetElementText(data, "filename"):
root = etree.tostring(E.Images(), pretty_print=True, xml_declaration=True, encoding="UTF-8")
root = XML.ElementFromString(root)
bannerPath = GetElementText(data, "filename")
bannerThumb = os.path.splitext(bannerPath)[0] + '_t' + os.path.splitext(bannerPath)[1]
mainUrl, thumbUrl, mainLocalPath, thumbLocalPath = functions.ParseImage(bannerPath, constants.TVDB_IMAGES_URL, os.path.join("TvDB", id, "thumbs"), bannerThumb)
SubElement(root, "Image", id = "1", mainUrl = mainUrl, thumbUrl = thumbUrl, mainLocalPath = mainLocalPath, thumbLocalPath = thumbLocalPath)
self.Thumbs = root
##--------------------------------Number-------------------------------##
if GetElementText(data, "EpisodeNumber"):
self.Number = str(GetElementText(data, "EpisodeNumber")).zfill(2)
##--------------------------------Season-------------------------------##
if GetElementText(data, "SeasonNumber"):
self.Season = str(GetElementText(data, "SeasonNumber")).zfill(2)
| gpl-3.0 | 1,307,995,058,636,179,700 | 59.170732 | 266 | 0.45002 | false | 4.827789 | false | false | false |
ckot/django-oneall | django_oneall/templatetags/oneall.py | 2 | 2309 | # -*- coding: utf-8 -*-
from json import dumps
from django.template import Library
from django.utils.html import escape
from django.utils.safestring import mark_safe
from ..app import settings
from ..models import SocialUserCache, get_user_model
register = Library()
@register.inclusion_tag('oneall/header.html')
def oneall_header():
"""
OneAll required script.
This must go in the ``<head>...</head>`` section of your templates,
otherwise widgets won't load.
"""
return {'oneall_site_name': settings.credentials['site_name']}
@register.inclusion_tag('oneall/social_login.html')
def oneall_social_login(user=None, **kwargs):
"""
This tag displays the Social Login or Social Link widget.
Don't forget to include ``{% oneall_header %}``!
:param user: Logged in user for Social Link mode; if not provided, it's Social Login mode.
:param kwargs: Widget options as documented by OneAll. For example, ``grid_sizes=[8,5]``
"""
if isinstance(user, get_user_model()):
social_user = SocialUserCache.objects.filter(user=user).first()
if social_user:
kwargs['user_token'] = str(social_user.user_token)
else:
user = None # no cached social user, thus revert to social login mode
widget_settings = {}
for key, value in settings.login_widget(kwargs).items():
widget_settings[key] = mark_safe(dumps(value))
return {
'settings': widget_settings,
'mode': 'social_link' if user else 'social_login',
}
@register.inclusion_tag('oneall/social_sharing.html')
def oneall_share(layout='s', **kwargs):
"""
This tag display the `Social Sharing`_ widget.
.. _Social Sharing: https://www.oneall.com/services/social-sharing/
Don't forget to include ``{% oneall_header %}``!
:param layout: Button layout as defined by the Social Sharing Wizard.
:param kwargs: Social link arguments.
"""
layout = str(layout).lower()
if layout not in 'smlhv':
raise ValueError("Invalid layout (%s). Must be one of S M L H or V." % layout)
args = ' '.join(('data-%s="%s"' % (k, escape(v)) for k, v in kwargs.items()))
return {
'layout': layout,
'arguments': mark_safe(args),
'networks': settings.share_widget['networks']
}
| mit | 3,545,824,287,108,598,300 | 31.985714 | 94 | 0.650498 | false | 3.688498 | false | false | false |
ksons/gltf-blender-importer | addons/io_scene_gltf_ksons/vnode.py | 1 | 24998 | from math import pi
from mathutils import Matrix, Quaternion, Vector, Euler
from .compat import mul
from .mesh import mesh_name
# The node graph in glTF needs to fixed up quite a bit before it will work for
# Blender. We first create a graph of "virtual nodes" to match the graph in the
# glTF file and then transform it in a bunch of passes to make it suitable for
# Blender import.
class VNode:
def __init__(self):
# The ID of the glTF node this vnode was created from, or None if there
# wasn't one
self.node_id = None
# List of child vnodes
self.children = []
# Parent vnode, or None for the root
self.parent = None
# (Vector, Quaternion, Vector) triple of the local-to-parent TRS transform
self.trs = (Vector((0, 0, 0)), Quaternion((1, 0, 0, 0)), Vector((1, 1, 1)))
# What type of Blender object will be created for this vnode: one of
# OBJECT, ARMATURE, BONE, or ROOT (for the special vnode that we use the
# turn the forest into a tree to make things easier to process).
self.type = 'OBJECT'
# Dicts of instance data
self.mesh = None
self.camera = None
self.light = None
# If this node had an instance in glTF but we moved it to another node,
# we record where we put it here
self.mesh_moved_to = None
self.camera_moved_to = None
self.light_moved_to = None
# These will be filled out after realization with the Blender data
# created for this vnode.
self.blender_object = None
self.blender_armature = None
self.blender_editbone = None
self.blender_name = None
# The editbone's (Translation, Rotation)
self.editbone_tr = None
self.posebone_s = None
self.editbone_local_to_armature = Matrix.Identity(4)
self.bone_length = 0
# Correction to apply to the original TRS to get the editbone TR
self.correction_rotation = Quaternion((1, 0, 0, 0))
self.correction_homscale = 1
def create_vtree(op):
initial_vtree(op)
insert_armatures(op)
move_instances(op)
adjust_bones(op)
# In the first pass, create the vgraph from the forest from the glTF file,
# making one OBJECT for each node
#
# OBJ
# / \
# OBJ OBJ
# / \
# OBJ OBJ
#
# (The ROOT is also added, but we won't draw it)
def initial_vtree(op):
nodes = op.gltf.get('nodes', [])
op.node_id_to_vnode = {}
# Create a vnode for each node
for node_id, node in enumerate(nodes):
vnode = VNode()
vnode.node_id = node_id
vnode.name = node.get('name', 'nodes[%d]' % node_id)
vnode.trs = get_node_trs(op, node)
vnode.type = 'OBJECT'
if 'mesh' in node:
vnode.mesh = {
'mesh': node['mesh'],
'primitive_idx': None, # use all primitives
'skin': node.get('skin'),
'weights': node.get('weights', op.gltf['meshes'][node['mesh']].get('weights')),
}
if 'camera' in node:
vnode.camera = {
'camera': node['camera'],
}
if 'KHR_lights_punctual' in node.get('extensions', {}):
vnode.light = {
'light': node['extensions']['KHR_lights_punctual']['light'],
}
op.node_id_to_vnode[node_id] = vnode
# Fill in the parent/child relationships
for node_id, node in enumerate(nodes):
vnode = op.node_id_to_vnode[node_id]
for child_id in node.get('children', []):
child_vnode = op.node_id_to_vnode[child_id]
# Prevent cycles
assert(child_vnode.parent == None)
child_vnode.parent = vnode
vnode.children.append(child_vnode)
# Add a root node to make the forest of vnodes into a tree.
op.root_vnode = VNode()
op.root_vnode.type = 'ROOT'
for vnode in op.node_id_to_vnode.values():
if vnode.parent == None:
vnode.parent = op.root_vnode
op.root_vnode.children.append(vnode)
# There is no special kind of node used for skinning in glTF. Joints are just
# regular nodes. But in Blender, only a bone can be used for skinning and bones
# are descendants of armatures.
#
# In the second pass we insert enough ARMATURE vnodes into the vtree so that
# every vnode which is the joint of a skin is a descendant of an ARMATURE. All
# descendants of ARMATURES are then turned into bones.
#
# OBJ
# / \
# OBJ ARMA
# |
# BONE
# / \
# BONE BONE
def insert_armatures(op):
# Insert an armature for every skin
skins = op.gltf.get('skins', [])
for skin_id, skin in enumerate(skins):
armature = VNode()
armature.name = skin.get('name', 'skins[%d]' % skin_id)
armature.type = 'ARMATURE'
# We're going to find a place to insert the armature. It must be above
# all of the joint nodes.
vnodes_below = [op.node_id_to_vnode[joint_id] for joint_id in skin['joints']]
# Add in the skeleton node too (which we hope is an ancestor of the joints).
if 'skeleton' in skin:
vnodes_below.append(op.node_id_to_vnode[skin['skeleton']])
ancestor = lowest_common_ancestor(vnodes_below)
ancestor_is_joint = ancestor.node_id in skin['joints']
if ancestor_is_joint:
insert_above(ancestor, armature)
else:
insert_below(ancestor, armature)
# Walk down the tree, marking all children of armatures as bones and
# deleting any armature which is a descendant of another.
def visit(vnode, armature_ancestor):
# Make a copy of this because we don't want it to change (when we delete
# a vnode) while we're in the middle of iterating it
children = list(vnode.children)
# If we are below an armature...
if armature_ancestor:
# Found an armature descended of another
if vnode.type == 'ARMATURE':
remove_vnode(vnode)
else:
vnode.type = 'BONE'
vnode.armature_vnode = armature_ancestor
else:
if vnode.type == 'ARMATURE':
armature_ancestor = vnode
for child in children:
visit(child, armature_ancestor)
visit(op.root_vnode, None)
# Now we need to enforce Blender's rule that (1) and object may have only one
# data instance (ie. only one of a mesh or a camera or a light), and (2) a bone
# may not have a data instance at all. We also need to move all cameras/lights
# to new children so that we have somewhere to hang the glTF->Blender axis
# conversion they need.
#
#
# OBJ Eg. if there was a mesh and camera on OBJ1
# / \ we will move the camera to a new child OBJ3
# OBJ1 ARMA (leaving the mesh on OBJ1).
# / | And if there was a mesh on BONE2 we will move
# OBJ3 BONE the mesh to OBJ4
# / \
# BONE BONE2
# |
# OBJ4
def move_instances(op):
def move_instance_to_new_child(vnode, key):
inst = getattr(vnode, key)
setattr(vnode, key, None)
if key == 'mesh':
id = inst['mesh']
name = op.gltf['meshes'][id].get('name', 'meshes[%d]' % id)
elif key == 'camera':
id = inst['camera']
name = op.gltf['cameras'][id].get('name', 'cameras[%d]' % id)
elif key == 'light':
id = inst['light']
lights = op.gltf['extensions']['KHR_lights_punctual']['lights']
name = lights[id].get('name', 'lights[%d]' % id)
else:
assert(False)
new_child = VNode()
new_child.name = name
new_child.parent = vnode
vnode.children.append(new_child)
new_child.type = 'OBJECT'
setattr(new_child, key, inst)
setattr(vnode, key + '_moved_to', [new_child])
if key in ['camera', 'light']:
# Quarter-turn around the X-axis. Needed for cameras or lights that
# point along the -Z axis in Blender but glTF says should look along the
# -Y axis
new_child.trs = (
new_child.trs[0],
Quaternion((2**(-1/2), 2**(-1/2), 0, 0)),
new_child.trs[2]
)
return new_child
def visit(vnode):
# Make a copy of this so we don't re-process new children we just made
children = list(vnode.children)
# Always move a camera or light to a child because it needs the
# gltf->Blender axis conversion
if vnode.camera:
move_instance_to_new_child(vnode, 'camera')
if vnode.light:
move_instance_to_new_child(vnode, 'light')
if vnode.mesh and vnode.type == 'BONE':
move_instance_to_new_child(vnode, 'mesh')
for child in children:
visit(child)
visit(op.root_vnode)
# The user can request that meshes be split into their primitives, like this
#
# OBJ => OBJ
# (mesh) / | \
# OBJ OBJ OBJ
# (mesh)(mesh)(mesh)
if op.options['split_meshes']:
def visit(vnode):
children = list(vnode.children)
if vnode.mesh is not None:
num_prims = len(op.gltf['meshes'][vnode.mesh['mesh']]['primitives'])
if num_prims > 1:
new_children = []
for prim_idx in range(0, num_prims):
child = VNode()
child.name = mesh_name(op, (vnode.mesh['mesh'], prim_idx))
child.type = 'OBJECT'
child.parent = vnode
child.mesh = {
'mesh': vnode.mesh['mesh'],
'skin': vnode.mesh['skin'],
'weights': vnode.mesh['weights'],
'primitive_idx': prim_idx,
}
new_children.append(child)
vnode.mesh = None
vnode.children += new_children
vnode.mesh_moved_to = new_children
for child in children:
visit(child)
visit(op.root_vnode)
# Here's the compilcated pass.
#
# Brief review: every bone in glTF has a local-to-parent transform T(b;pose).
# Sometimes we suppress the dependence on the pose and just write T(b). The
# composition with the parent's local-to-parent, and so on up the armature is
# the local-to-armature transform
#
# L(b) = T(root) ... T(ppb) T(pb) T(b)
#
# where pb is the parent of b, ppb is the grandparent, etc. In Blender the
# local-to-armature is
#
# LB(b) = E(root) P(root) ... E(ppb) P(ppb) E(pb) P(pb) E(b) P(b)
#
# where E(b) is a TR transform for the edit bone and P(b) is a TRS transform for
# the pose bone.
#
# NOTE: I am note entirely sure of that formula.
#
# In the rest position P(b;rest) = 1 for all b, so we would like to just make
# E(b) = T(b;rest), but we can't since T(b;rest) might have a scaling, and we
# also want to try to rotate T(b) so we can pick which way the Blender
# octahedorn points.
#
# So we're going to change T(b). For every bone b pick a rotation cr(b) and a
# scalar cs(b) and define the correction matrix for b to be
#
# C(b) = Rot[cr(b)] HomScale[cs(b)]
#
# and transform T(b) to
#
# T'(b) = C(pb)^{-1} T(b) C(b)
#
# If we compute L'(b) using the T'(b), most of the C terms cancel out and we get
#
# L'(b) = L(b) C(b)
#
# This is close enough; we'll be able to cancel off the extra C(b) later.
#
# How do we pick C(b)? Assume we've already computed C(pb) and calculate T'(b)
#
# T'(b)
# = C(pb)^{-1} T(b) C(b)
# = Rot[cr(pb)^{-1}] HomScale[1/cs(pb)]
# Trans[t] Rot[r] Scale[s]
# Rot[cr(b)] HomScale[cs(b)]
# { floating the Trans to the left, combining Rots }
# = Trans[ Rot[cr(pb)^{-1}] t / cs(pb) ]
# Rot[cr(pb)^{-1} r] HomScale[1/cs(pb)] Scale[s]
# Rot[cr(b)] HomScale[cs(b)]
#
# Now assume Scale[s] = HomScale[s] (and s is not 0), ie. the bone has a
# homogeneous scaling. Then we can rearrange this and get
#
# Trans[ Rot[cr(pb)^{-1}] t / cs(pb) ]
# Rot[cr(pb)^{-1} r cr(b)]
# HomScale[s cs(b) / cs(pb)]
#
# Now if we want the rotation to be R we can pick cr(b) = r^{-1} cr(pb) R. We
# also want the scale to be 1, because again, E(b) has a scaling of 1 in Blender
# always, so we pick cs(b) = cs(pb) / s.
#
# Okay, cool, so this is now a TR matrix and we can identify it with E(b).
#
# But what if Scale[s] **isn't** homogeneous? We appear to have no choice but to
# put it on P(b;loadtime) for some non-rest pose we'll set at load time. This is
# unfortunate because the rest pose in Blender won't be the same as the rest
# pose in glTF (and there's inverse bind matrix fallout too).
#
# So in that case we'll take C(b) = 1, and set
#
# E(b) = Trans[ Rot[cr(pb)^{-1}] t / cs(pb) ] Rot[cr(pb)^{-1} r]
# P(b;loadtime) = Scale[s / cs(pb)]
#
# So in both cases we now have LB(b) = L'(b).
#
# TODO: we can still pick a rotation when the scaling is heterogeneous
# Maps an axis into a rotation carrying that axis into +Y
AXIS_TO_PLUS_Y = {
'-X': Euler([0, 0, -pi/2]).to_quaternion(),
'+X': Euler([0, 0, pi/2]).to_quaternion(),
'-Y': Euler([pi, 0, 0]).to_quaternion(),
'+Y': Euler([0, 0, 0]).to_quaternion(),
'-Z': Euler([pi/2, 0, 0]).to_quaternion(),
'+Z': Euler([-pi/2, 0, 0]).to_quaternion(),
}
def adjust_bones(op):
# List of distances between bone heads (used for computing bone lengths)
interbone_dists = []
def visit_bone(vnode):
t, r, s = vnode.trs
cr_pb_inv = vnode.parent.correction_rotation.conjugated()
cs_pb = vnode.parent.correction_homscale
# Trans[ Rot[cr(pb)^{-1}] t / cs(pb) ]
editbone_t = mul(cr_pb_inv, t) / cs_pb
if is_non_degenerate_homscale(s):
# s is a homogeneous scaling (ie. scalar mutliplication)
s = s[0]
# cs(b) = cs(pb) / s
vnode.correction_homscale = cs_pb / s
if op.options['bone_rotation_mode'] == 'POINT_TO_CHILDREN':
# We always pick a rotation for cr(b) that is, up to sign, a permutation of
# the basis vectors. This is necessary for some of the algebra to work out
# in animtion importing.
# General idea: assume we have one child. We want to rotate so
# that our tail comes close to the child's head. Out tail lies
# on our +Y axis. The child head is going to be Rot[cr(b)^{-1}]
# child_t / cs(b) where b is us and child_t is the child's
# trs[0]. So we want to choose cr(b) so that this is as close as
# possible to +Y, ie. we want to rotate it so that its largest
# component is along the +Y axis. Note that only the sign of
# cs(b) affects this, not its magnitude (since the largest
# component of v, 2v, 3v, etc. are all the same).
# Pick the targest to rotate towards. If we have one child, use
# that.
if len(vnode.children) == 1:
target = vnode.children[0].trs[0]
elif len(vnode.children) == 0:
# As though we had a child displaced the same way we were
# from our parent.
target = vnode.trs[0]
else:
# Mean of all our children.
center = Vector((0, 0, 0))
for child in vnode.children:
center += child.trs[0]
center /= len(vnode.children)
target = center
if cs_pb / s < 0:
target = -target
x, y, z = abs(target[0]), abs(target[1]), abs(target[2])
if x > y and x > z:
axis = '-X' if target[0] < 0 else '+X'
elif z > x and z > y:
axis = '-Z' if target[2] < 0 else '+Z'
else:
axis = '-Y' if target[1] < 0 else '+Y'
cr_inv = AXIS_TO_PLUS_Y[axis]
cr = cr_inv.conjugated()
elif op.options['bone_rotation_mode'] == 'NONE':
cr = Quaternion((1, 0, 0, 0))
else:
assert(False)
vnode.correction_rotation = cr
# cr(pb)^{-1} r cr(b)
editbone_r = mul(mul(cr_pb_inv, r), cr)
else:
# TODO: we could still use a rotation here.
# C(b) = 1
vnode.correction_rotation = Quaternion((1, 0, 0, 0))
vnode.correction_homscale = 1
# E(b) = Trans[ Rot[cr(pb)^{-1}] t / cs(pb) ] Rot[cr(pb)^{-1} r]
# P(b;loadtime) = Scale[s / cs(pb)]
editbone_r = mul(cr_pb_inv, r)
vnode.pose_s = s / cs_pb
vnode.editbone_tr = editbone_t, editbone_r
vnode.editbone_local_to_armature = mul(
vnode.parent.editbone_local_to_armature,
mul(Matrix.Translation(editbone_t), editbone_r.to_matrix().to_4x4())
)
interbone_dists.append(editbone_t.length)
# Try getting a bone length for our parent. The length that makes its
# tail meet our head is considered best. Since the tail always lies
# along the +Y ray, the closer we are to the this ray the better our
# length will be compared to the legnths chosen by our siblings. This is
# measured by the "goodness". Amoung siblings with equal goodness, we
# pick the smaller length, so the parent's tail will meet the nearest
# child.
vnode.bone_length_goodness = -99999
if vnode.parent.type == 'BONE':
t_len = editbone_t.length
if t_len > 0.0005:
goodness = editbone_t.dot(Vector((0, 1, 0))) / t_len
if goodness > vnode.parent.bone_length_goodness:
if vnode.parent.bone_length == 0 or vnode.parent.bone_length > t_len:
vnode.parent.bone_length = t_len
vnode.parent.bone_length_goodness = goodness
# Recurse
for child in vnode.children:
if child.type == 'BONE':
visit_bone(child)
# We're on the way back up. Last chance to set our bone length if none
# of our children did. Use our parent's, if it has one. Otherwise, use
# the average inter-bone distance, if its not 0. Otherwise, just use 1
# -_-
if not vnode.bone_length:
if vnode.parent.bone_length:
vnode.bone_length = vnode.parent.bone_length
else:
avg = sum(interbone_dists) / max(1, len(interbone_dists))
if avg > 0.0005:
vnode.bone_length = avg
else:
vnode.bone_length = 1
def visit(vnode):
if vnode.type == 'ARMATURE':
for child in vnode.children:
visit_bone(child)
else:
for child in vnode.children:
visit(child)
visit(op.root_vnode)
# Remember that L'(b) = L(b) C(b)? Remember that we had to move any
# mesh/camera/light on a bone to an object? That's the perfect place to put
# a transform of C(b)^{-1} to cancel out that extra factor!
def visit_object_child_of_bone(vnode):
t, r, s = vnode.trs
# This moves us back along the bone, because for some reason Blender
# puts us at the tail of the bone, not the head
t -= Vector((0, vnode.parent.bone_length, 0))
# Rot[cr^{-1}] HomScale[1/cs] Trans[t] Rot[r] Scale[s]
# = Trans[ Rot[cr^{-1}] t / cs] Rot[cr^{-1} r] Scale[s / cs]
cr_inv = vnode.parent.correction_rotation.conjugated()
cs = vnode.parent.correction_homscale
t = mul(cr_inv, t) / cs
r = mul(cr_inv, r)
s /= cs
vnode.trs = t, r, s
def visit(vnode):
if vnode.type == 'OBJECT' and vnode.parent.type == 'BONE':
visit_object_child_of_bone(vnode)
for child in vnode.children:
visit(child)
visit(op.root_vnode)
# Helper functions below here:
def get_node_trs(op, node):
"""Gets the TRS proerties from a glTF node JSON object."""
if 'matrix' in node:
m = node['matrix']
# column-major to row-major
m = Matrix([m[0:4], m[4:8], m[8:12], m[12:16]])
m.transpose()
loc, rot, sca = m.decompose()
# wxyz -> xyzw
# convert_rotation will switch back
rot = [rot[1], rot[2], rot[3], rot[0]]
else:
sca = node.get('scale', [1.0, 1.0, 1.0])
rot = node.get('rotation', [0.0, 0.0, 0.0, 1.0])
loc = node.get('translation', [0.0, 0.0, 0.0])
# Switch glTF coordinates to Blender coordinates
sca = op.convert_scale(sca)
rot = op.convert_rotation(rot)
loc = op.convert_translation(loc)
return [Vector(loc), Quaternion(rot), Vector(sca)]
def lowest_common_ancestor(vnodes):
"""
Compute the lowest common ancestors of vnodes, ie. the lowest node of which
all the given vnodes are (possibly impromper) descendants.
"""
assert(vnodes)
def ancestor_list(vnode):
"""
Computes the ancestor-list of vnode: the list of all its ancestors
starting at the root and ending at vnode itself.
"""
chain = []
while vnode:
chain.append(vnode)
vnode = vnode.parent
chain.reverse()
return chain
def first_difference(l1, l2):
"""
Returns the index of the first difference in two lists, or None if one is
a prefix of the other.
"""
i = 0
while True:
if i == len(l1) or i == len(l2):
return None
if l1[i] != l2[i]:
return i
i += 1
# Ancestor list for the lowest common ancestor so far
lowest_ancestor_list = ancestor_list(vnodes[0])
for vnode in vnodes[1:]:
cur_ancestor_list = ancestor_list(vnode)
d = first_difference(lowest_ancestor_list, cur_ancestor_list)
if d is None:
if len(cur_ancestor_list) < len(lowest_ancestor_list):
lowest_ancestor_list = cur_ancestor_list
else:
lowest_ancestor_list = lowest_ancestor_list[:d]
return lowest_ancestor_list[-1]
def insert_above(vnode, new_parent):
"""
Inserts new_parent between vnode and its parent. That is, turn
parent -> sister parent -> sister
-> vnode into -> new_parent -> vnode
-> sister -> sister
"""
if not vnode.parent:
vnode.parent = new_parent
new_parent.parent = None
new_parent.children = [vnode]
else:
parent = vnode.parent
i = parent.children.index(vnode)
parent.children[i] = new_parent
new_parent.parent = parent
new_parent.children = [vnode]
vnode.parent = new_parent
def insert_below(vnode, new_child):
"""
Insert new_child between vnode and its children. That is, turn
vnode -> child vnode -> new_child -> child
-> child into -> child
-> child -> child
"""
children = vnode.children
vnode.children = [new_child]
new_child.parent = vnode
new_child.children = children
for child in children:
child.parent = new_child
def remove_vnode(vnode):
"""
Remove vnode from the tree, replacing it with its children. That is, turn
parent -> sister parent -> sister
-> vnode -> child into -> child
-> sister -> sister
"""
assert(vnode.parent) # will never be called on the root
parent = vnode.parent
children = vnode.children
i = parent.children.index(vnode)
parent.children = (
parent.children[:i] +
children +
parent.children[i+1:]
)
for child in children:
child.parent = parent
vnode.parent = None
vnode.children = []
def is_non_degenerate_homscale(s):
"""Returns true if Scale[s] is multiplication by a non-zero scalar."""
largest = max(abs(x) for x in s)
smallest = min(abs(x) for x in s)
if smallest < 1e-5:
# Too small; consider it zero
return False
return largest - smallest < largest * 0.001
| mit | -7,500,626,219,166,107,000 | 34.711429 | 95 | 0.552524 | false | 3.493292 | false | false | false |
ff0000/red-fab-deploy2 | fab_deploy2/operating_systems/ubuntu/gunicorn.py | 1 | 1886 | import os
from fab_deploy2.base import gunicorn as base_gunicorn
from fab_deploy2.tasks import task_method
from fab_deploy2 import functions
from fabric.api import sudo, env
from fabric.contrib.files import append
class Gunicorn(base_gunicorn.Gunicorn):
"""
Install gunicorn and set it up with supervisor.
"""
user = 'www-data'
group = 'www-data'
daemonize = False
conf_location = '/etc/supervisor/conf.d/'
@task_method
def start(self):
functions.execute_on_host('utils.start_or_restart_supervisor', name=self.gunicorn_name)
@task_method
def stop(self):
sudo('supervisorctl stop %s' % self.gunicorn_name)
def _setup_service(self, env_value=None):
installed = functions.execute_on_host('utils.install_package', package_name='supervisor')
if installed:
sudo('update-rc.d supervisor defaults')
if self.conf_location:
gunicorn_conf = os.path.join(env.configs_path,
"gunicorn/supervisor_{0}.conf".format(self.gunicorn_name))
sudo('ln -sf {0} {1}'.format(gunicorn_conf, self.conf_location))
def upload_templates(self):
context = super(Gunicorn, self).upload_templates()
functions.render_template("gunicorn/supervisor_gunicorn.conf",
os.path.join(env.configs_path, "gunicorn/supervisor_{0}.conf".format(self.gunicorn_name)),
context=context)
return context
def _setup_rotate(self, path):
text = [
"%s {" % path,
" copytruncate",
" size 1M",
" rotate 5",
"}"]
sudo('touch /etc/logrotate.d/%s.conf' % self.gunicorn_name)
for t in text:
append('/etc/logrotate.d/%s.conf' % self.gunicorn_name,
t, use_sudo=True)
Gunicorn().as_tasks()
| mit | 7,457,330,191,078,504,000 | 32.678571 | 114 | 0.605514 | false | 3.662136 | false | false | false |
peca3d/instanceAlongCurve | instanceAlongCurve.py | 1 | 45705 | import sys
import math
import random
import traceback
import maya.mel as mel
import pymel.core as pm
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.OpenMayaRender as OpenMayaRender
kPluginCmdName = "instanceAlongCurve"
kPluginNodeName = 'instanceAlongCurveLocator'
kPluginNodeClassify = 'utility/general'
kPluginNodeId = OpenMaya.MTypeId( 0x55555 )
# InstanceAlongCurve v1.0.2
class instanceAlongCurveLocator(OpenMayaMPx.MPxLocatorNode):
# Simple container class for compound vector attributes
class Vector3CompoundAttribute(object):
def __init__(self):
self.compound = OpenMaya.MObject()
self.x = OpenMaya.MObject()
self.y = OpenMaya.MObject()
self.z = OpenMaya.MObject()
# Input attributes
inputCurveAttr = OpenMaya.MObject()
inputTransformAttr = OpenMaya.MObject()
inputShadingGroupAttr = OpenMaya.MObject()
# Instance count related attributes
instanceCountAttr = OpenMaya.MObject()
instancingModeAttr = OpenMaya.MObject()
instanceLengthAttr = OpenMaya.MObject()
maxInstancesByLengthAttr = OpenMaya.MObject()
displayTypeAttr = OpenMaya.MObject()
bboxAttr = OpenMaya.MObject()
orientationModeAttr = OpenMaya.MObject()
inputOrientationAxisAttr = Vector3CompoundAttribute()
class RampAttributes(object):
def __init__(self):
self.ramp = OpenMaya.MObject() # normalized ramp
self.rampOffset = OpenMaya.MObject() # evaluation offset for ramp
self.rampAxis = OpenMaya.MObject() # ramp normalized axis
self.rampAmplitude = OpenMaya.MObject() # ramp amplitude
self.rampRandomAmplitude = OpenMaya.MObject() # ramp random amplitude
# Simple container class for compound vector attributes
class RampValueContainer(object):
def __init__(self, mObject, dataBlock, rampAttr, normalize):
self.ramp = OpenMaya.MRampAttribute(OpenMaya.MPlug(mObject, rampAttr.ramp))
self.rampOffset = dataBlock.inputValue(rampAttr.rampOffset).asFloat()
self.rampRandomAmplitude = dataBlock.inputValue(rampAttr.rampRandomAmplitude).asFloat()
self.rampAmplitude = dataBlock.inputValue(rampAttr.rampAmplitude).asFloat()
if normalize:
self.rampAxis = dataBlock.inputValue(rampAttr.rampAxis.compound).asVector().normal()
else:
self.rampAxis = dataBlock.inputValue(rampAttr.rampAxis.compound).asVector()
# Ramp attributes
positionRampAttr = RampAttributes()
rotationRampAttr = RampAttributes()
scaleRampAttr = RampAttributes()
# Output vectors
outputTranslationAttr = Vector3CompoundAttribute()
outputRotationAttr = Vector3CompoundAttribute()
outputScaleAttr = Vector3CompoundAttribute()
def __init__(self):
OpenMayaMPx.MPxLocatorNode.__init__(self)
# This method is not being called?
# def __del__(self):
# print "CALLBACK: " + str(callbackId)
# OpenMaya.MNodeMessage.removeCallback(self.callbackId)
# OpenMayaMPx.MPxLocatorNode.__del__(self)
def postConstructor(self):
OpenMaya.MFnDependencyNode(self.thisMObject()).setName("instanceAlongCurveLocatorShape#")
self.callbackId = OpenMaya.MNodeMessage.addAttributeChangedCallback(self.thisMObject(), self.attrChangeCallback)
self.updateInstanceConnections()
# Find original SG to reassign it to instance
def getShadingGroup(self):
inputSGPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.inputShadingGroupAttr)
sgNode = self.getSingleSourceObjectFromPlug(inputSGPlug)
if sgNode is not None and sgNode.hasFn(OpenMaya.MFn.kSet):
return OpenMaya.MFnSet(sgNode)
return None
def assignShadingGroup(self, fnDagNode):
fnSet = self.getShadingGroup()
if fnSet is not None:
# Easiest, cleanest way seems to be calling MEL.
# sets command handles everything, even nested instanced dag paths
mdgm = OpenMaya.MDGModifier()
mdgm.commandToExecute("sets -e -nw -fe " + fnSet.name() + " " + fnDagNode.name())
mdgm.doIt()
# Helper function to get an array of available logical indices from the sparse array
# TODO: maybe it can be precalculated?
def getAvailableLogicalIndices(self, plug, numIndices):
# Allocate and initialize
outIndices = OpenMaya.MIntArray(numIndices)
indices = OpenMaya.MIntArray(plug.numElements())
plug.getExistingArrayAttributeIndices(indices)
currentAvailableIndex = 0
indicesFound = 0
# Assuming indices are SORTED :)
for i in indices:
connectedPlug = plug.elementByLogicalIndex(i).isConnected()
# Iteratively find available indices in the sparse array
while i > currentAvailableIndex:
outIndices[indicesFound] = currentAvailableIndex
indicesFound += 1
currentAvailableIndex += 1
# Check against this index, add it if it is not connected
if i == currentAvailableIndex and not connectedPlug:
outIndices[indicesFound] = currentAvailableIndex
indicesFound += 1
currentAvailableIndex += 1
if indicesFound == numIndices:
return outIndices
# Fill remaining expected indices
for i in xrange(indicesFound, numIndices):
outIndices[i] = currentAvailableIndex
currentAvailableIndex += 1
return outIndices
def getNodeTransformFn(self):
dagNode = OpenMaya.MFnDagNode(self.thisMObject())
dagPath = OpenMaya.MDagPath()
dagNode.getPath(dagPath)
return OpenMaya.MFnDagNode(dagPath.transform())
def updateInstanceConnections(self):
# If the locator is being instanced, just stop updating its children.
# This is to prevent losing references to the locator instances' children
# If you want to change this locator, prepare the source before instantiating
if OpenMaya.MFnDagNode(self.thisMObject()).isInstanced():
return OpenMaya.kUnknownParameter
# Plugs
outputTranslationPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.outputTranslationAttr.compound)
outputRotationPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.outputRotationAttr.compound)
outputScalePlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.outputScaleAttr.compound)
expectedInstanceCount = self.getInstanceCountByMode()
numConnectedElements = outputTranslationPlug.numConnectedElements()
# Only instance if we are missing elements
# TODO: handle mismatches in translation/rotation plug connected elements (user deleted a plug? use connectionBroken method?)
if numConnectedElements < expectedInstanceCount:
inputTransformFn = self.getInputTransformFn()
if inputTransformFn is not None:
transformFn = self.getNodeTransformFn()
newInstancesCount = expectedInstanceCount - numConnectedElements
availableIndices = self.getAvailableLogicalIndices(outputTranslationPlug, newInstancesCount)
displayPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.displayTypeAttr)
LODPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.bboxAttr)
mdgModifier = OpenMaya.MDagModifier()
for i in availableIndices:
# Instance transform
# InstanceLeaf must be set to False to prevent crashes :)
trInstance = inputTransformFn.duplicate(True, False)
instanceFn = OpenMaya.MFnTransform(trInstance)
# Parent new instance
transformFn.addChild(trInstance)
instanceTranslatePlug = instanceFn.findPlug('translate', False)
outputTranslationPlugElement = outputTranslationPlug.elementByLogicalIndex(i)
instanceRotationPlug = instanceFn.findPlug('rotate', False)
outputRotationPlugElement = outputRotationPlug.elementByLogicalIndex(i)
instanceScalePlug = instanceFn.findPlug('scale', False)
outputScalePlugElement = outputScalePlug.elementByLogicalIndex(i)
# Enable drawing overrides
overrideEnabledPlug = instanceFn.findPlug("overrideEnabled", False)
overrideEnabledPlug.setBool(True)
instanceDisplayPlug = instanceFn.findPlug("overrideDisplayType", False)
instanceLODPlug = instanceFn.findPlug("overrideLevelOfDetail", False)
if not outputTranslationPlugElement.isConnected():
mdgModifier.connect(outputTranslationPlugElement, instanceTranslatePlug)
if not outputRotationPlugElement.isConnected():
mdgModifier.connect(outputRotationPlugElement, instanceRotationPlug)
if not outputScalePlugElement.isConnected():
mdgModifier.connect(outputScalePlugElement, instanceScalePlug)
if not instanceDisplayPlug.isConnected():
mdgModifier.connect(displayPlug, instanceDisplayPlug)
if not instanceLODPlug.isConnected():
mdgModifier.connect(LODPlug, instanceLODPlug)
mdgModifier.doIt()
# Finally, assign SG to all children
self.assignShadingGroup(transformFn)
# Remove instances if necessary
elif numConnectedElements > expectedInstanceCount:
connections = OpenMaya.MPlugArray()
toRemove = numConnectedElements - expectedInstanceCount
mdgModifier = OpenMaya.MDGModifier()
for i in xrange(toRemove):
outputTranslationPlugElement = outputTranslationPlug.connectionByPhysicalIndex(numConnectedElements - 1 - i)
outputTranslationPlugElement.connectedTo(connections, False, True)
for c in xrange(connections.length()):
mdgModifier.deleteNode(connections[c].node())
mdgModifier.doIt()
def attrChangeCallback(self, msg, plug, otherPlug, clientData):
incomingDirection = (OpenMaya.MNodeMessage.kIncomingDirection & msg) == OpenMaya.MNodeMessage.kIncomingDirection
attributeSet = (OpenMaya.MNodeMessage.kAttributeSet & msg) == OpenMaya.MNodeMessage.kAttributeSet
isCorrectAttribute = (plug.attribute() == instanceAlongCurveLocator.instanceCountAttr)
isCorrectAttribute = isCorrectAttribute or (plug.attribute() == instanceAlongCurveLocator.instancingModeAttr)
isCorrectAttribute = isCorrectAttribute or (plug.attribute() == instanceAlongCurveLocator.instanceLengthAttr)
isCorrectAttribute = isCorrectAttribute or (plug.attribute() == instanceAlongCurveLocator.maxInstancesByLengthAttr)
isCorrectNode = OpenMaya.MFnDependencyNode(plug.node()).typeName() == kPluginNodeName
try:
if isCorrectNode and isCorrectAttribute and attributeSet and incomingDirection:
self.updateInstanceConnections()
except:
sys.stderr.write('Failed trying to update instances. stack trace: \n')
sys.stderr.write(traceback.format_exc())
def getSingleSourceObjectFromPlug(self, plug):
if plug.isConnected():
# Get connected input plugs
connections = OpenMaya.MPlugArray()
plug.connectedTo(connections, True, False)
# Find input transform
if connections.length() == 1:
return connections[0].node()
return None
def getInputTransformFn(self):
inputTransformPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.inputTransformAttr)
transform = self.getSingleSourceObjectFromPlug(inputTransformPlug)
# Get Fn from a DAG path to get the world transformations correctly
if transform is not None and transform.hasFn(OpenMaya.MFn.kTransform):
path = OpenMaya.MDagPath()
trFn = OpenMaya.MFnDagNode(transform)
trFn.getPath(path)
return OpenMaya.MFnTransform(path)
return None
def getCurveFn(self):
inputCurvePlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.inputCurveAttr)
curve = self.getSingleSourceObjectFromPlug(inputCurvePlug)
# Get Fn from a DAG path to get the world transformations correctly
if curve is not None:
path = OpenMaya.MDagPath()
trFn = OpenMaya.MFnDagNode(curve)
trFn.getPath(path)
path.extendToShape()
if path.node().hasFn(OpenMaya.MFn.kNurbsCurve):
return OpenMaya.MFnNurbsCurve(path)
return None
# Calculate expected instances by the instancing mode
def getInstanceCountByMode(self):
instancingModePlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.instancingModeAttr)
inputCurvePlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.inputCurveAttr)
if inputCurvePlug.isConnected() and instancingModePlug.asInt() == 1:
instanceLengthPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.instanceLengthAttr)
maxInstancesByLengthPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.maxInstancesByLengthAttr)
curveFn = self.getCurveFn()
return min(maxInstancesByLengthPlug.asInt(), int(curveFn.length() / instanceLengthPlug.asFloat()))
instanceCountPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.instanceCountAttr)
return instanceCountPlug.asInt()
def getParamOffset(self):
p = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.distOffsetAttr)
return p.asFloat()
def getRandomizedValue(self, random, randomAmplitude, value):
return (random.random() * 2.0 - 1.0) * randomAmplitude + value
def updateInstancePositions(self, curveFn, dataBlock, count, distOffset ):
point = OpenMaya.MPoint()
curveLength = curveFn.length()
translateArrayHandle = dataBlock.outputArrayValue(instanceAlongCurveLocator.outputTranslationAttr.compound)
# Deterministic random
random.seed(count)
rampValues = instanceAlongCurveLocator.RampValueContainer(self.thisMObject(), dataBlock, instanceAlongCurveLocator.positionRampAttr, False)
# Make sure there are enough handles...
for i in xrange(min(count, translateArrayHandle.elementCount())):
rampValue = self.getRampValueAtPosition(rampValues, i, count)
dist = curveLength * (i / float(count)) + distOffset
#normalize
if( dist > 0.0 ):
while( dist > curveLength ):
dist = dist - curveLength
elif( dist < 0.0 ):
while( dist < 0.0 ):
dist = dist + curveLength
# EP curves **really** dont like param at 0.0 (crashes)
param = max( min( curveFn.findParamFromLength( dist ), curveLength ), 0.001 )
curveFn.getPointAtParam(param, point)
try:
normal = curveFn.normal(param)
tangent = curveFn.tangent(param)
bitangent = (normal ^ tangent)
except:
print 'curveFn normal get error. param:%f/length:%f' % ( param, curveLength )
twistNormal = normal * self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.x
twistBitangent = bitangent * self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.y
twistTangent = tangent * self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.z
point += twistNormal + twistTangent + twistBitangent
translateArrayHandle.jumpToArrayElement(i)
translateHandle = translateArrayHandle.outputValue()
translateHandle.set3Double(point.x, point.y, point.z)
translateArrayHandle.setAllClean()
translateArrayHandle.setClean()
def getRampValueAtPosition(self, rampValues, i, count):
util = OpenMaya.MScriptUtil()
util.createFromDouble(0.0)
valuePtr = util.asFloatPtr()
position = math.fmod((i / float(count)) + rampValues.rampOffset, 1.0)
rampValues.ramp.getValueAtPosition(position, valuePtr)
return util.getFloat(valuePtr)
def updateInstanceScale(self, curveFn, dataBlock, count):
point = OpenMaya.MPoint()
scaleArrayHandle = dataBlock.outputArrayValue(instanceAlongCurveLocator.outputScaleAttr.compound)
# Deterministic random
random.seed(count)
rampValues = instanceAlongCurveLocator.RampValueContainer(self.thisMObject(), dataBlock, instanceAlongCurveLocator.scaleRampAttr, False)
# Make sure there are enough handles...
for i in xrange(min(count, scaleArrayHandle.elementCount())):
rampValue = self.getRampValueAtPosition(rampValues, i, count)
point.x = self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.x
point.y = self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.y
point.z = self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.z
scaleArrayHandle.jumpToArrayElement(i)
scaleHandle = scaleArrayHandle.outputValue()
scaleHandle.set3Double(point.x, point.y, point.z)
scaleArrayHandle.setAllClean()
scaleArrayHandle.setClean()
def updateInstanceRotations(self, curveFn, dataBlock, count, distOffset ):
point = OpenMaya.MPoint()
curveLength = curveFn.length()
rotationArrayHandle = dataBlock.outputArrayValue(instanceAlongCurveLocator.outputRotationAttr.compound)
startOrientation = dataBlock.outputValue(instanceAlongCurveLocator.inputOrientationAxisAttr.compound).asVector().normal()
# Deterministic random
random.seed(count)
rampValues = instanceAlongCurveLocator.RampValueContainer(self.thisMObject(), dataBlock, instanceAlongCurveLocator.rotationRampAttr, True)
rotMode = dataBlock.inputValue(instanceAlongCurveLocator.orientationModeAttr).asInt()
inputTransformPlug = OpenMaya.MPlug(self.thisMObject(), instanceAlongCurveLocator.inputTransformAttr)
inputTransformRotation = OpenMaya.MQuaternion()
if inputTransformPlug.isConnected():
self.getInputTransformFn().getRotation(inputTransformRotation, OpenMaya.MSpace.kWorld)
for i in xrange(min(count, rotationArrayHandle.elementCount())):
rampValue = self.getRampValueAtPosition(rampValues, i, count)
dist = curveLength * (i / float(count)) + distOffset
#normalize
if( dist > 0.0 ):
while( dist > curveLength ):
dist = dist - curveLength
elif( dist < 0.0 ):
while( dist < 0.0 ):
dist = dist + curveLength
# EP curves **really** dont like param at 0.0 (crashes)
param = max( min( curveFn.findParamFromLength( dist ), curveLength ), 0.002 )
rot = OpenMaya.MQuaternion()
try:
normal = curveFn.normal(param)
tangent = curveFn.tangent(param)
bitangent = (normal ^ tangent)
except:
print 'curveFn normal get error. param:%f/length:%f' % ( param, curveLength )
if rotMode == 1:
rot = inputTransformRotation; # No realtime preview - use an inputRotation for that?
elif rotMode == 2:
rot = startOrientation.rotateTo(normal)
elif rotMode == 3:
rot = startOrientation.rotateTo(tangent)
elif rotMode == 4:
rot = startOrientation.rotateTo(tangent)
if i % 2 == 1:
rot *= OpenMaya.MQuaternion(3.141592 * .5, tangent)
twistNormal = self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.x
twistNormal = OpenMaya.MQuaternion(twistNormal * 0.0174532925, normal) # DegToRad
twistTangent = self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.y
twistTangent = OpenMaya.MQuaternion(twistTangent * 0.0174532925, tangent) # DegToRad
twistBitangent = self.getRandomizedValue(random, rampValues.rampRandomAmplitude, rampValue * rampValues.rampAmplitude) * rampValues.rampAxis.z
twistBitangent = OpenMaya.MQuaternion(twistBitangent * 0.0174532925, bitangent) # DegToRad
rot = (rot * twistNormal * twistTangent * twistBitangent).asEulerRotation().asVector()
rotationArrayHandle.jumpToArrayElement(i)
rotationHandle = rotationArrayHandle.outputValue()
rotationHandle.set3Double(rot.x, rot.y, rot.z)
rotationArrayHandle.setAllClean()
rotationArrayHandle.setClean()
def isBounded(self):
return True
def boundingBox(self):
return OpenMaya.MBoundingBox(OpenMaya.MPoint(-1,-1,-1), OpenMaya.MPoint(1,1,1))
def compute(self, plug, dataBlock):
try:
curveDataHandle = dataBlock.inputValue(instanceAlongCurveLocator.inputCurveAttr)
curve = curveDataHandle.asNurbsCurveTransformed()
if not curve.isNull():
curveFn = OpenMaya.MFnNurbsCurve(curve)
instanceCount = self.getInstanceCountByMode()
distOffset = self.getParamOffset()
if plug == instanceAlongCurveLocator.outputTranslationAttr.compound:
self.updateInstancePositions(curveFn, dataBlock, instanceCount, distOffset)
if plug == instanceAlongCurveLocator.outputRotationAttr.compound:
self.updateInstanceRotations(curveFn, dataBlock, instanceCount, distOffset)
if plug == instanceAlongCurveLocator.outputScaleAttr.compound:
self.updateInstanceScale(curveFn, dataBlock, instanceCount)
except:
sys.stderr.write('Failed trying to compute locator. stack trace: \n')
sys.stderr.write(traceback.format_exc())
return OpenMaya.kUnknownParameter
@staticmethod
def nodeCreator():
return OpenMayaMPx.asMPxPtr( instanceAlongCurveLocator() )
@classmethod
def addCompoundVector3Attribute(cls, compoundAttribute, attributeName, unitType, arrayAttr, inputAttr, defaultValue):
unitAttr = OpenMaya.MFnUnitAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
compoundAttribute.x = unitAttr.create(attributeName + "X", attributeName + "X", unitType, defaultValue.x)
unitAttr.setWritable( inputAttr )
cls.addAttribute(compoundAttribute.x)
compoundAttribute.y = unitAttr.create(attributeName + "Y", attributeName + "Y", unitType, defaultValue.y)
unitAttr.setWritable( inputAttr )
cls.addAttribute(compoundAttribute.y)
compoundAttribute.z = unitAttr.create(attributeName + "Z", attributeName + "Z", unitType, defaultValue.z)
unitAttr.setWritable( inputAttr )
cls.addAttribute(compoundAttribute.z)
# Output compound
compoundAttribute.compound = nAttr.create(attributeName, attributeName,
compoundAttribute.x, compoundAttribute.y, compoundAttribute.z)
nAttr.setWritable( inputAttr )
nAttr.setArray( arrayAttr )
nAttr.setUsesArrayDataBuilder( arrayAttr )
nAttr.setDisconnectBehavior(OpenMaya.MFnAttribute.kDelete)
cls.addAttribute(compoundAttribute.compound)
@classmethod
def addRampAttributes(cls, rampAttributes, attributeName, unitType, defaultAxisValue):
unitAttr = OpenMaya.MFnUnitAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
rampAttributes.ramp = OpenMaya.MRampAttribute.createCurveRamp(attributeName + "Ramp", attributeName + "Ramp")
cls.addAttribute(rampAttributes.ramp)
rampAttributes.rampOffset = nAttr.create(attributeName + "RampOffset", attributeName + "RampOffset", OpenMaya.MFnNumericData.kFloat, 0.0)
nAttr.setKeyable( True )
cls.addAttribute( rampAttributes.rampOffset )
rampAttributes.rampAmplitude = nAttr.create(attributeName + "RampAmplitude", attributeName + "RampAmplitude", OpenMaya.MFnNumericData.kFloat, 1.0)
nAttr.setKeyable( True )
cls.addAttribute( rampAttributes.rampAmplitude )
rampAttributes.rampRandomAmplitude = nAttr.create(attributeName + "RampRandomAmplitude", attributeName + "RampRandomAmplitude", OpenMaya.MFnNumericData.kFloat, 0.0)
nAttr.setMin(0.0)
nAttr.setSoftMax(1.0)
nAttr.setKeyable( True )
cls.addAttribute( rampAttributes.rampRandomAmplitude )
cls.addCompoundVector3Attribute(rampAttributes.rampAxis, attributeName + "RampAxis", unitType, False, True, defaultAxisValue)
@staticmethod
def nodeInitializer():
# To make things more readable
node = instanceAlongCurveLocator
nAttr = OpenMaya.MFnNumericAttribute()
msgAttributeFn = OpenMaya.MFnMessageAttribute()
curveAttributeFn = OpenMaya.MFnTypedAttribute()
enumFn = OpenMaya.MFnEnumAttribute()
node.inputTransformAttr = msgAttributeFn.create("inputTransform", "it")
node.addAttribute( node.inputTransformAttr )
node.inputShadingGroupAttr = msgAttributeFn.create("inputShadingGroup", "iSG")
node.addAttribute( node.inputShadingGroupAttr )
# Input curve transform
node.inputCurveAttr = curveAttributeFn.create( 'inputCurve', 'curve', OpenMaya.MFnData.kNurbsCurve)
node.addAttribute( node.inputCurveAttr )
## Input instance count
node.instanceCountAttr = nAttr.create("instanceCount", "iic", OpenMaya.MFnNumericData.kInt, 5)
nAttr.setMin(1)
nAttr.setSoftMax(100)
nAttr.setChannelBox( False )
nAttr.setConnectable( False )
node.addAttribute( node.instanceCountAttr)
## curve parameter start offset
node.distOffsetAttr = nAttr.create("distOffset", "pOffset", OpenMaya.MFnNumericData.kFloat, 0.0)
node.addAttribute( node.distOffsetAttr )
## Max instances when defined by instance length
node.maxInstancesByLengthAttr = nAttr.create("maxInstancesByLength", "mibl", OpenMaya.MFnNumericData.kInt, 50)
nAttr.setMin(0)
nAttr.setSoftMax(200)
nAttr.setChannelBox( False )
nAttr.setConnectable( False )
node.addAttribute( node.maxInstancesByLengthAttr)
# Length between instances
node.instanceLengthAttr = nAttr.create("instanceLength", "ilength", OpenMaya.MFnNumericData.kFloat, 1.0)
nAttr.setMin(0.01)
nAttr.setSoftMax(1.0)
nAttr.setChannelBox( False )
nAttr.setConnectable( False )
node.addAttribute( node.instanceLengthAttr)
# Display override options
node.displayTypeAttr = enumFn.create('instanceDisplayType', 'idt')
enumFn.addField( "Normal", 0 );
enumFn.addField( "Template", 1 );
enumFn.addField( "Reference", 2 );
enumFn.setDefault("Reference")
node.addAttribute( node.displayTypeAttr )
# Enum for selection of instancing mode
node.instancingModeAttr = enumFn.create('instancingMode', 'instancingMode')
enumFn.addField( "Count", 0 );
enumFn.addField( "Distance", 1 );
node.addAttribute( node.instancingModeAttr )
# Enum for selection of orientation mode
node.orientationModeAttr = enumFn.create('orientationMode', 'rotMode')
enumFn.addField( "Identity", 0 );
enumFn.addField( "Copy from Source", 1 );
enumFn.addField( "Normal", 2 );
enumFn.addField( "Tangent", 3 );
enumFn.addField( "Chain", 4 );
enumFn.setDefault("Tangent")
node.addAttribute( node.orientationModeAttr )
node.addCompoundVector3Attribute(node.inputOrientationAxisAttr, "inputOrientationAxis", OpenMaya.MFnUnitAttribute.kDistance, False, True, OpenMaya.MVector(0.0, 0.0, 1.0))
node.bboxAttr = nAttr.create('instanceBoundingBox', 'ibb', OpenMaya.MFnNumericData.kBoolean)
node.addAttribute( node.bboxAttr )
node.addRampAttributes(node.positionRampAttr, "position", OpenMaya.MFnUnitAttribute.kDistance, OpenMaya.MVector(0.0, 0.0, 0.0))
node.addRampAttributes(node.rotationRampAttr, "rotation", OpenMaya.MFnUnitAttribute.kAngle, OpenMaya.MVector(0.0, 0.0, 0.0))
node.addRampAttributes(node.scaleRampAttr, "scale", OpenMaya.MFnUnitAttribute.kDistance, OpenMaya.MVector(1.0, 1.0, 1.0))
# Output attributes
node.addCompoundVector3Attribute(node.outputTranslationAttr, "outputTranslation", OpenMaya.MFnUnitAttribute.kDistance, True, False, OpenMaya.MVector(0.0, 0.0, 0.0))
node.addCompoundVector3Attribute(node.outputRotationAttr, "outputRotation", OpenMaya.MFnUnitAttribute.kAngle, True, False, OpenMaya.MVector(0.0, 0.0, 0.0))
node.addCompoundVector3Attribute(node.outputScaleAttr, "outputScale", OpenMaya.MFnUnitAttribute.kDistance, True, False, OpenMaya.MVector(1.0, 1.0, 1.0))
def rampAttributeAffects(rampAttributes, affectedAttr):
node.attributeAffects( rampAttributes.ramp, affectedAttr)
node.attributeAffects( rampAttributes.rampOffset, affectedAttr)
node.attributeAffects( rampAttributes.rampAmplitude, affectedAttr)
node.attributeAffects( rampAttributes.rampAxis.compound, affectedAttr)
node.attributeAffects( rampAttributes.rampRandomAmplitude, affectedAttr)
# Translation affects
node.attributeAffects( node.inputCurveAttr, node.outputTranslationAttr.compound )
node.attributeAffects( node.instanceCountAttr, node.outputTranslationAttr.compound)
node.attributeAffects( node.instanceLengthAttr, node.outputTranslationAttr.compound)
node.attributeAffects( node.instancingModeAttr, node.outputTranslationAttr.compound)
node.attributeAffects( node.maxInstancesByLengthAttr, node.outputTranslationAttr.compound)
node.attributeAffects( node.distOffsetAttr, node.outputTranslationAttr.compound )
rampAttributeAffects(node.positionRampAttr, node.outputTranslationAttr.compound)
# Rotation affects
node.attributeAffects( node.inputCurveAttr, node.outputRotationAttr.compound )
node.attributeAffects( node.instanceCountAttr, node.outputRotationAttr.compound)
node.attributeAffects( node.instanceLengthAttr, node.outputRotationAttr.compound)
node.attributeAffects( node.instancingModeAttr, node.outputRotationAttr.compound)
node.attributeAffects( node.maxInstancesByLengthAttr, node.outputRotationAttr.compound)
node.attributeAffects( node.orientationModeAttr, node.outputRotationAttr.compound)
node.attributeAffects( node.distOffsetAttr, node.outputRotationAttr.compound )
node.attributeAffects( node.inputOrientationAxisAttr.compound, node.outputRotationAttr.compound)
rampAttributeAffects(node.rotationRampAttr, node.outputRotationAttr.compound)
# Scale affects
node.attributeAffects( node.inputCurveAttr, node.outputScaleAttr.compound )
node.attributeAffects( node.instanceCountAttr, node.outputScaleAttr.compound)
node.attributeAffects( node.instanceLengthAttr, node.outputScaleAttr.compound)
node.attributeAffects( node.instancingModeAttr, node.outputScaleAttr.compound)
node.attributeAffects( node.maxInstancesByLengthAttr, node.outputScaleAttr.compound)
node.attributeAffects( node.distOffsetAttr, node.outputScaleAttr.compound )
rampAttributeAffects(node.scaleRampAttr, node.outputScaleAttr.compound)
def initializePlugin( mobject ):
mplugin = OpenMayaMPx.MFnPlugin( mobject )
try:
# Register command
mplugin.registerCommand( kPluginCmdName, instanceAlongCurveCommand.cmdCreator )
if OpenMaya.MGlobal.mayaState() != OpenMaya.MGlobal.kBatch:
mplugin.addMenuItem("Instance Along Curve", "MayaWindow|mainEditMenu", kPluginCmdName, "")
# Register AE template
pm.callbacks(addCallback=loadAETemplateCallback, hook='AETemplateCustomContent', owner=kPluginNodeName)
# Register node
mplugin.registerNode( kPluginNodeName, kPluginNodeId, instanceAlongCurveLocator.nodeCreator,
instanceAlongCurveLocator.nodeInitializer, OpenMayaMPx.MPxNode.kLocatorNode, kPluginNodeClassify )
except:
sys.stderr.write('Failed to register plugin instanceAlongCurve. stack trace: \n')
sys.stderr.write(traceback.format_exc())
raise
def uninitializePlugin( mobject ):
mplugin = OpenMayaMPx.MFnPlugin( mobject )
try:
mplugin.deregisterCommand( kPluginCmdName )
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( 'Failed to deregister plugin instanceAlongCurve')
raise
###############
# AE TEMPLATE #
###############
def loadAETemplateCallback(nodeName):
AEinstanceAlongCurveLocatorTemplate(nodeName)
class AEinstanceAlongCurveLocatorTemplate(pm.ui.AETemplate):
def addControl(self, control, label=None, **kwargs):
pm.ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
pm.ui.AETemplate.beginLayout(self, name, collapse=collapse)
def __init__(self, nodeName):
pm.ui.AETemplate.__init__(self,nodeName)
self.thisNode = None
self.node = pm.PyNode(self.nodeName)
if self.node.type() == kPluginNodeName:
self.beginScrollLayout()
self.beginLayout("Instance Along Curve Settings", collapse=0)
self.addControl("instancingMode", label="Instancing Mode", changeCommand=self.onInstanceModeChanged)
self.addControl("instanceCount", label="Count", changeCommand=self.onInstanceModeChanged)
self.addControl("instanceLength", label="Distance", changeCommand=self.onInstanceModeChanged)
self.addControl("maxInstancesByLength", label="Max Instances", changeCommand=self.onInstanceModeChanged)
self.addControl("distOffset", label="Initial Position Offset", changeCommand=lambda nodeName: self.updateDimming(nodeName, "distOffset"))
self.addSeparator()
self.addControl("orientationMode", label="Orientation Mode", changeCommand=lambda nodeName: self.updateDimming(nodeName, "orientationMode"))
self.addControl("inputOrientationAxis", label="Orientation Axis", changeCommand=lambda nodeName: self.updateDimming(nodeName, "inputOrientationAxis"))
self.addSeparator()
self.addControl("instanceDisplayType", label="Instance Display Type", changeCommand=lambda nodeName: self.updateDimming(nodeName, "instanceDisplayType"))
self.addControl("instanceBoundingBox", label="Use bounding box", changeCommand=lambda nodeName: self.updateDimming(nodeName, "instanceBoundingBox"))
self.addSeparator()
self.addControl("inputTransform", label="Input object", changeCommand=lambda nodeName: self.updateDimming(nodeName, "inputTransform"))
self.addControl("inputShadingGroup", label="Shading Group", changeCommand=lambda nodeName: self.updateDimming(nodeName, "inputShadingGroup"))
def showRampControls(rampName):
self.beginLayout(rampName.capitalize() + " Control", collapse=True)
mel.eval('AEaddRampControl("' + nodeName + "." + rampName + 'Ramp"); ')
self.addControl(rampName + "RampOffset", label= rampName.capitalize() + " Ramp Offset")
self.addControl(rampName + "RampAmplitude", label= rampName.capitalize() + " Ramp Amplitude")
self.addControl(rampName + "RampRandomAmplitude", label= rampName.capitalize() + " Ramp Random")
self.addControl(rampName + "RampAxis", label= rampName.capitalize() + " Ramp Axis")
self.endLayout()
showRampControls("position")
showRampControls("rotation")
showRampControls("scale")
self.addExtraControls()
self.endLayout()
self.endScrollLayout()
def onRampUpdate(self, attr):
pm.gradientControl(attr)
def updateDimming(self, nodeName, attr):
if pm.PyNode(nodeName).type() == kPluginNodeName:
node = pm.PyNode(nodeName)
instanced = node.isInstanced()
hasInputTransform = node.inputTransform.isConnected()
hasInputCurve = node.inputCurve.isConnected()
self.dimControl(nodeName, attr, instanced or (not hasInputCurve) or (not hasInputTransform))
def onInstanceModeChanged(self, nodeName):
self.updateDimming(nodeName, "instancingMode")
if pm.PyNode(nodeName).type() == kPluginNodeName:
nodeAttr = pm.PyNode(nodeName + ".instancingMode")
mode = nodeAttr.get("instancingMode")
# If dimmed, do not update dimming
if mode == 0:
self.dimControl(nodeName, "instanceLength", True)
self.dimControl(nodeName, "maxInstancesByLength", True)
self.updateDimming(nodeName, "instanceCount")
else:
self.updateDimming(nodeName, "instanceLength")
self.updateDimming(nodeName, "maxInstancesByLength")
self.dimControl(nodeName, "instanceCount", True)
# Command
class instanceAlongCurveCommand(OpenMayaMPx.MPxCommand):
def __init__(self):
OpenMayaMPx.MPxCommand.__init__(self)
self.mUndo = []
def isUndoable(self):
return True
def undoIt(self):
OpenMaya.MGlobal.displayInfo( "Undo: instanceAlongCurveCommand\n" )
# Reversed for undo :)
for m in reversed(self.mUndo):
m.undoIt()
def redoIt(self):
OpenMaya.MGlobal.displayInfo( "Redo: instanceAlongCurveCommand\n" )
for m in self.mUndo:
m.doIt()
def hasShapeBelow(self, dagPath):
sutil = OpenMaya.MScriptUtil()
uintptr = sutil.asUintPtr()
sutil.setUint(uintptr , 0)
dagPath.numberOfShapesDirectlyBelow(uintptr)
return sutil.getUint(uintptr) > 0
def findShadingGroup(self, dagPath):
# Search in children first before extending to shape
for child in xrange(dagPath.childCount()):
childDagPath = OpenMaya.MDagPath()
fnDagNode = OpenMaya.MFnDagNode(dagPath.child(child))
fnDagNode.getPath(childDagPath)
fnSet = self.findShadingGroup(childDagPath)
if fnSet is not None:
return fnSet
if self.hasShapeBelow(dagPath):
dagPath.extendToShape()
fnDepNode = OpenMaya.MFnDependencyNode(dagPath.node())
instPlugArray = fnDepNode.findPlug("instObjGroups")
instPlugArrayElem = instPlugArray.elementByLogicalIndex(dagPath.instanceNumber())
if instPlugArrayElem.isConnected():
connectedPlugs = OpenMaya.MPlugArray()
instPlugArrayElem.connectedTo(connectedPlugs, False, True)
if connectedPlugs.length() == 1:
sgNode = connectedPlugs[0].node()
if sgNode.hasFn(OpenMaya.MFn.kSet):
return OpenMaya.MFnSet(sgNode)
return None
def doIt(self,argList):
try:
list = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList(list)
if list.length() == 2:
curveDagPath = OpenMaya.MDagPath()
list.getDagPath(0, curveDagPath)
curveDagPath.extendToShape()
shapeDagPath = OpenMaya.MDagPath()
list.getDagPath(1, shapeDagPath)
if(curveDagPath.node().hasFn(OpenMaya.MFn.kNurbsCurve)):
# We need the curve transform
curvePlug = OpenMaya.MFnDagNode(curveDagPath).findPlug("worldSpace", False).elementByLogicalIndex(0)
# We need the shape's transform too
transformFn = OpenMaya.MFnDagNode(shapeDagPath.transform())
transformMessagePlug = transformFn.findPlug("message", True)
shadingGroupFn = self.findShadingGroup(shapeDagPath)
# Create node first
mdagModifier = OpenMaya.MDagModifier()
self.mUndo.append(mdagModifier)
newNode = mdagModifier.createNode(kPluginNodeId)
mdagModifier.doIt()
# Assign new correct name and select new locator
newNodeFn = OpenMaya.MFnDagNode(newNode)
newNodeFn.setName("instanceAlongCurveLocator#")
newNodeTransformName = newNodeFn.name()
# Get the node shape
nodeShapeDagPath = OpenMaya.MDagPath()
newNodeFn.getPath(nodeShapeDagPath)
nodeShapeDagPath.extendToShape()
newNodeFn = OpenMaya.MFnDagNode(nodeShapeDagPath)
def setupRamp(rampAttr):
# Set default ramp values
defaultPositions = OpenMaya.MFloatArray(1, 0.0)
defaultValues = OpenMaya.MFloatArray(1, 1.0)
defaultInterpolations = OpenMaya.MIntArray(1, 3)
plug = newNodeFn.findPlug(rampAttr.ramp)
ramp = OpenMaya.MRampAttribute(plug)
ramp.addEntries(defaultPositions, defaultValues, defaultInterpolations)
setupRamp(instanceAlongCurveLocator.positionRampAttr)
setupRamp(instanceAlongCurveLocator.rotationRampAttr)
setupRamp(instanceAlongCurveLocator.scaleRampAttr)
# Select new node shape
OpenMaya.MGlobal.clearSelectionList()
msel = OpenMaya.MSelectionList()
msel.add(nodeShapeDagPath)
OpenMaya.MGlobal.setActiveSelectionList(msel)
# Connect :D
mdgModifier = OpenMaya.MDGModifier()
self.mUndo.append(mdgModifier)
mdgModifier.connect(curvePlug, newNodeFn.findPlug(instanceAlongCurveLocator.inputCurveAttr))
mdgModifier.connect(transformMessagePlug, newNodeFn.findPlug(instanceAlongCurveLocator.inputTransformAttr))
if shadingGroupFn is not None:
shadingGroupMessagePlug = shadingGroupFn.findPlug("message", True)
mdgModifier.connect(shadingGroupMessagePlug, newNodeFn.findPlug(instanceAlongCurveLocator.inputShadingGroupAttr))
mdgModifier.doIt()
# (pymel) create a locator and make it the parent
locator = pm.createNode('locator', ss=True, p=newNodeTransformName)
# Show AE because instancing logic depends on update...
mel.eval("openAEWindow")
# Enable drawing overrides
instanceCountPlug = newNodeFn.findPlug("instanceCount", False)
instanceCountPlug.setInt(10)
else:
sys.stderr.write("Please select a curve first")
else:
sys.stderr.write("Please select a curve and a shape")
except:
sys.stderr.write('Failed trying to create locator. stack trace: \n')
sys.stderr.write(traceback.format_exc())
@staticmethod
def cmdCreator():
return OpenMayaMPx.asMPxPtr( instanceAlongCurveCommand() ) | mit | -1,575,250,964,229,552,000 | 44.935678 | 178 | 0.660737 | false | 4.130219 | false | false | false |
jeremy-bernon/Lilith | lilith/internal/reducedcouplingslo.py | 1 | 8964 | ##########################################################################
#
# This file is part of Lilith
# made by J. Bernon and B. Dumont
#
# Web page: http://lpsc.in2p3.fr/projects-th/lilith/
#
# In case of questions email bernon@lpsc.in2p3.fr dum33@ibs.re.kr
#
#
# Lilith is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lilith is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Lilith. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
import os
import numpy as np
from math import sqrt, log
from cmath import sqrt as csqrt
from cmath import asin as casin
from cmath import log as clog
from scipy.interpolate import UnivariateSpline
from param import *
wdir = '/'.join(os.path.realpath(__file__).split("/")[:-1])+'/Grids/'
#### read VBF -> h cross section grid @ LO and interpolation ####
def VBF_ff(spline_deg=3):
VBF_LO_file = open(wdir+'VBF_LO_grid.dat',"r")
VBF_LO_grid = {"WW": [], "ZZ": [], "WZ": []}
hmassVBF = []
for line in VBF_LO_file:
line = line.strip("\n").split()
hmassVBF.append(float(line[0]))
VBF_LO_grid["WW"].append(float(line[1]))
VBF_LO_grid["ZZ"].append(float(line[2]))
VBF_LO_grid["WZ"].append(float(line[3])-float(line[1])-float(line[2]))
CVBFW_LO = UnivariateSpline(hmassVBF, VBF_LO_grid["WW"], k=spline_deg, s=0)
CVBFZ_LO = UnivariateSpline(hmassVBF, VBF_LO_grid["ZZ"], k=spline_deg, s=0)
CVBFWZ_LO = UnivariateSpline(hmassVBF, VBF_LO_grid["WZ"], k=spline_deg, s=0)
VBF_LO_file.close()
VBF_LO = {"CVBFW_LO": CVBFW_LO, "CVBFZ_LO": CVBFZ_LO, "CVBFWZ_LO": CVBFWZ_LO}
return VBF_LO
def fhiggs(t):
if t<=1.:
return casin(sqrt(t))**2.
else:
return -(log((sqrt(t) + sqrt(t-1.))/(sqrt(t) - sqrt(t-1.))) - pi*1j )**2./4.
def ghiggs(t):
if t<=1:
return csqrt(1-1/t)/2. * ( clog((1 + csqrt(1-1/t))/(1 - csqrt(1-1/t)))-pi*1j)
else:
return csqrt(1/t-1)*casin(csqrt(t))
def I1(tau,l):
return (tau*l/(2.*(tau-l)) + tau**2*l**2 /(2.*(tau-l)**2)*(fhiggs(1/tau)-fhiggs(1/l)) +
tau**2 * l /(tau-l)**2 * (ghiggs(1/tau)-ghiggs(1/l)))
def I2(tau,l):
return -tau*l/(2.*(tau-l))*(fhiggs(1/tau)-fhiggs(1/l))
def A12(tau):
return 2./tau *(1.+(1.-1./tau) * fhiggs(tau))
def A1(tau):
return -(3.*tau+2.*tau**2. +3.*(2.*tau-1.) * fhiggs(tau))/tau**2
def A12Zgamma(tau,l):
return I1(tau,l)-I2(tau,l)
def A1Zgamma(tau,l):
return cW*(4.*(3.-sW2/cW2)*I2(tau,l)+((1.+2./tau)*sW2/cW2-(5.+2./tau))*I1(tau,l))
def A12A(tau):
return 2/tau*fhiggs(tau)
def computeformfactors():
FF = {}
FF["A12t"] = lambda mh: A12((mh/(2.*mt))**2)
FF["A12c"] = lambda mh: A12((mh/(2.*mc))**2)
FF["A12b"] = lambda mh: A12((mh/(2.*mb))**2)
FF["A12tau"] = lambda mh: A12((mh/(2.*mtau))**2)
FF["A1W"] = lambda mh: A1((mh/(2.*mW))**2)
FF["A12At"] = lambda mh: A12A((mh/(2.*mt))**2)
FF["A12Ac"] = lambda mh: A12A((mh/(2.*mc))**2)
FF["A12Ab"] = lambda mh: A12A((mh/(2.*mb))**2)
FF["A12Atau"] = lambda mh: A12A((mh/(2.*mtau))**2)
FF["A12Zt"] = lambda mh: A12Zgamma(4*(mt/(mh*1.))**2, 4*(mt/(mZ*1.))**2)
FF["A12Zc"] = lambda mh: A12Zgamma(4*(mc/(mh*1.))**2, 4*(mc/(mZ*1.))**2)
FF["A12Zb"] = lambda mh: A12Zgamma(4*(mb/(mh*1.))**2, 4*(mb/(mZ*1.))**2)
FF["A12Ztau"] = lambda mh: A12Zgamma(4*(mtau/(mh*1.))**2, 4*(mtau/(mZ*1.))**2)
FF["A1ZW"] = lambda mh: A1Zgamma(4*(mW/(mh*1.))**2, 4*(mW/(mZ*1.))**2)
FF["A12AZt"] = lambda mh: I2(4*(mt/(mh*1.))**2, 4*(mt/(mZ*1.))**2)
FF["A12AZc"] = lambda mh: I2(4*(mc/(mh*1.))**2, 4*(mc/(mZ*1.))**2)
FF["A12AZb"] = lambda mh: I2(4*(mb/(mh*1.))**2, 4*(mb/(mZ*1.))**2)
FF["A12AZtau"] = lambda mh: I2(4*(mtau/(mh*1.))**2, 4*(mtau/(mZ*1.))**2)
return FF
#### decay: h -> gamma gamma width @ LO & reduced coupling ####
def Htogammagamma(mh, CT, CB, CC, CL, CW, CTIM, CBIM, CCIM, CLIM, FF):
return (10**6*Gf*alpha**2/(128.*pi**3*np.sqrt(2))*mh**3 *
abs(3.*(2./3.)**2 *(CT*FF["A12t"] + CC*FF["A12c"]) +
(CB*3.*(1./3.)**2 * FF["A12b"] + CL*FF["A12tau"])+CW*FF["A1W"])**2. +
10**6*Gf*alpha**2/(128.*pi**3*np.sqrt(2))*mh**3 *
abs(3.*(2./3.)**2 *(CTIM*FF["A12At"] + CCIM*FF["A12Ac"]) +
(CBIM*3.*(1./3.)**2 * FF["A12Ab"] + CLIM*FF["A12Atau"]))**2.)
def redCgammagamma(CT, CB, CC, CL, CW, CTIM, CBIM, CCIM, CLIM, FF, Cgammagammaadd=0.):
A12t = FF["A12t"]
A12c = FF["A12c"]
A12b = FF["A12b"]
A12tau = FF["A12tau"]
A1W = FF["A1W"]
A12At = FF["A12At"]
A12Ac = FF["A12Ac"]
A12Ab = FF["A12Ab"]
A12Atau = FF["A12Atau"]
return (sqrt( ( (abs(3.*(2./3.)**2 *(CT*A12t + CC*A12c) +
CB*3.*(1./3.)**2 * A12b + CL*A12tau+CW*A1W)**2.) +
(abs(3.*(2./3.)**2 *(CTIM*A12At + CCIM*A12Ac) +
3.*(-1./3.)**2*CBIM*A12Ab + CLIM*A12Atau)**2)) /
(abs(3.*(2./3.)**2 *(A12t + A12c) +
(3.*(1./3.)**2 * A12b + A12tau)+A1W)**2.) )
+ Cgammagammaadd)
#### decay: h -> Z gamma width @ LO & reduced coupling ####
def HtoZgamma(mh, CT, CB, CC, CL, CW, CTIM, CBIM, CCIM, CLIM, FF):
return (10**6*Gf**2*mW**2*alpha*mh**3/(64.*pi**4)*(1-mZ**2/mh**2)**3 *
abs( 1/(cW)*3.*2/3.*(CT*(2*1/2. - 4*2/3.*sW2)*FF["A12Zt"] +
CC*(2*1/2. - 4*2/3.*sW2)*FF["A12Zc"]) +
1/(cW)*(3*(-1/3.)*CB*(2*(-1/2.) - 4*(-1/3.)*sW2)*FF["A12Zb"] +
(-1)*CL*(2*(-1/2.) - 4*(-1)*sW2)*FF["A12Ztau"]) +
CW*FF["A1ZW"] )**2 +
10**6*Gf**2*mW**2*alpha*mh**3/(16.*pi**4)*(1-mZ**2/mh**2)**3 *
abs( 1/(cW)*3.*2/3.*(CTIM*(2*1/2. - 4*2/3.*sW2)*FF["A12AZt"] +
CCIM*(2*1/2. - 4*2/3.*sW2)*FF["A12AZc"]) +
1/(cW)*(3*(-1/3.)*CBIM*(2*(-1/2.) - 4*(-1/3.)*sW2)*FF["A12AZb"] +
(-1)*CL*(2*(-1/2.) - 4*(-1)*sW2)*FF["A12Ztau"]) )**2)
def redCZgamma(CT, CB, CC, CL, CW, CTIM, CBIM, CCIM, CLIM, FF, CZgammaadd=0.):
A12Zt = FF["A12Zt"]
A12Zc = FF["A12Zc"]
A12Zb = FF["A12Zb"]
A12Ztau = FF["A12Ztau"]
A1ZW = FF["A1ZW"]
A12AZt = FF["A12AZt"]
A12AZc = FF["A12AZc"]
A12AZb = FF["A12AZb"]
A12AZtau = FF["A12AZtau"]
vt = (2*1/2. - 4*2/3.*sW2)
vc = (2*1/2. - 4*2/3.*sW2)
vb = (2*(-1/2.) - 4*(-1/3.)*sW2)
vl = (2*(-1/2.) - 4*(-1)*sW2)
return (sqrt( (abs( 1/(cW)*(3.*2/3.*(CT*vt*A12Zt + CC*vc*A12Zc) +
(3*(-1/3.)*CB*vb*A12Zb +
(-1)*CL*vl*A12Ztau)) + CW*A1ZW )**2 +
4*abs(1/(cW)*(3.*2/3.*(CTIM*vt*A12AZt + CCIM*vc*A12AZc) +
3*(-1/3.)*CBIM*vb*A12AZb + (-1)*CLIM*vl*A12AZtau))**2)/
(abs(1/(cW)*(3.*2/3.*(vt*A12Zt + vc*A12Zc) +
(3*(-1/3.)*vb*A12Zb + (-1)*vl*A12Ztau)) + A1ZW )**2) )
+ CZgammaadd)
#### decay: h -> g g width @ LO & reduced coupling ####
def Htogg(mh, CT, CB, CC, CTIM, CBIM, CCIM, FF):
return (10**3*Gf*alphas**2*mh**3/(36.*np.sqrt(2)*pi**3) *
abs(0.75*(CT*FF["A12t"] + CB*FF["A12b"] + CC*FF["A12c"]))**2 +
10**3*Gf*alphas**2*mh**3/(36.*np.sqrt(2)*pi**3) *
abs(0.75*(CTIM*FF["A12At"] + CBIM*FF["A12Ab"] + CCIM*FF["A12Ac"]))**2)
def redCgg(CT, CB, CC, CTIM, CBIM, CCIM, FF, Cggadd=0.):
A12t = FF["A12t"]
A12c = FF["A12c"]
A12b = FF["A12b"]
A12At = FF["A12At"]
A12Ac = FF["A12Ac"]
A12Ab = FF["A12Ab"]
return (sqrt( (abs(0.75*(CT*A12t + CB*A12b + CC*A12c))**2 +
abs(0.75*(CTIM*A12At + CBIM*A12Ab + CCIM*A12Ac))**2)/
(abs(0.75*(A12t + A12b + A12c))**2) )
+ Cggadd)
#### production: g g -> h cross section @ LO ####
def ggFh(mh, CT, CB, CC, CTIM, CBIM, CCIM, FF):
return (Gf*alphas_mh**2/(288.*np.sqrt(2)*pi) *
abs(0.75*(CT*FF["A12t"] + CB*FF["A12b"] + CC*FF["A12c"]))**2 +
Gf*alphas_mh**2/(288.*np.sqrt(2)*pi) *
abs(0.75*(CTIM*FF["A12At"] + CBIM*FF["A12Ab"] + CCIM*FF["A12Ac"]))**2)
#### production: VBF -> h cross section @ LO & reduced coupling ####
def redCVBF(CW, CZ, grid_interp):
VBFW_LO = grid_interp["CVBFW_LO"]
VBFZ_LO = grid_interp["CVBFZ_LO"]
VBFWZ_LO = grid_interp["CVBFWZ_LO"]
return sqrt( (CW**2*VBFW_LO + CZ**2*VBFZ_LO + CW*CZ*VBFWZ_LO)/
(VBFW_LO + VBFZ_LO + VBFWZ_LO) )
| gpl-3.0 | 2,113,900,174,371,581,200 | 37.472103 | 91 | 0.498327 | false | 2.267645 | false | false | false |
ThiagoLopes/ReportToEasy | advogaprojeto/core/urls.py | 1 | 1325 | """advogaprojeto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from core import views
from django.contrib import admin
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^registrar/$', views.registrar, name='registrar'),
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.log_out, name='logout'),
url(r'^index/$', views.index, name='index'),
url(r'^gerar_documento$', views.gerar_documento, name='gerar_documento'),
url(r'^cadastro$', views.cadastrar_documento, name='cadastrar_documento'),
url(r'^documento/(?P<id_arquivo>\d+)$', views.documento, name='documento'),
url(r'^delete/(?P<id_template>\d+)$', views.delete_template, name='delete')
]
| gpl-2.0 | -118,183,623,692,947,940 | 43.166667 | 79 | 0.679245 | false | 3.279703 | false | false | false |
sevein/archivematica | src/MCPClient/lib/clientScripts/verifySIPCompliance.py | 1 | 2583 | #!/usr/bin/env python2
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <joseph@artefactual.com>
from __future__ import print_function
import os
import sys
requiredDirectories = ["objects", \
"logs", \
"metadata",\
"metadata/submissionDocumentation"]
allowableFiles = ["processingMCP.xml"]
def checkDirectory(directory, ret=0):
try:
for directory, subDirectories, files in os.walk(directory):
for file in files:
filePath = os.path.join(directory, file)
except Exception as inst:
print("Error navigating directory:", directory.__str__(), file=sys.stderr)
print(type(inst), file=sys.stderr)
print(inst.args, file=sys.stderr)
ret += 1
return ret
def verifyDirectoriesExist(SIPDir, ret=0):
for directory in requiredDirectories:
if not os.path.isdir(os.path.join(SIPDir, directory)):
print("Required Directory Does Not Exist: " + directory, file=sys.stderr)
ret += 1
return ret
def verifyNothingElseAtTopLevel(SIPDir, ret=0):
for entry in os.listdir(SIPDir):
if os.path.isdir(os.path.join(SIPDir, entry)):
if entry not in requiredDirectories:
print("Error, directory exists: " + entry, file=sys.stderr)
ret += 1
else:
if entry not in allowableFiles:
print("Error, file exists: " + entry, file=sys.stderr)
ret += 1
return ret
if __name__ == '__main__':
SIPDir = sys.argv[1]
ret = verifyDirectoriesExist(SIPDir)
ret = verifyNothingElseAtTopLevel(SIPDir, ret)
ret = checkDirectory(SIPDir, ret)
if ret != 0:
import time
time.sleep(10)
quit(ret)
| agpl-3.0 | -7,424,118,532,454,776,000 | 33.905405 | 85 | 0.655052 | false | 3.84375 | false | false | false |
ytsarev/rally | tests/deploy/engines/test_existing.py | 1 | 2136 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test ExistingCloud."""
import jsonschema
from rally import deploy
from rally.deploy.engines import existing
from tests import test
class TestExistingCloud(test.TestCase):
def setUp(self):
self.deployment = {
'config': {
'name': 'ExistingCloud',
'endpoint': {
'auth_url': 'http://example.net:5000/v2.0/',
'username': 'admin',
'password': 'myadminpass',
'tenant_name': 'demo',
},
},
}
super(TestExistingCloud, self).setUp()
def test_init(self):
existing.ExistingCloud(self.deployment)
def test_init_invalid_config(self):
self.deployment['config']['endpoint'] = 42
self.assertRaises(jsonschema.ValidationError,
existing.ExistingCloud, self.deployment)
def test_deploy(self):
engine = existing.ExistingCloud(self.deployment)
endpoints = engine.deploy()
admin_endpoint = self.deployment['config']['endpoint'].copy()
self.assertEqual(admin_endpoint, endpoints[0].to_dict())
def test_cleanup(self):
existing.ExistingCloud(self.deployment).cleanup()
def test_is_in_factory(self):
name = self.deployment['config']['name']
engine = deploy.EngineFactory.get_engine(name,
self.deployment)
self.assertIsInstance(engine, existing.ExistingCloud)
| apache-2.0 | -5,666,701,684,342,492,000 | 34.016393 | 78 | 0.619382 | false | 4.377049 | true | false | false |
opencord/voltha | netconf/protoc_plugins/rpc_gw_gen.py | 1 | 9485 | #!/usr/bin/env python
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from google.protobuf.compiler import plugin_pb2 as plugin
from google.protobuf.descriptor_pb2 import ServiceDescriptorProto, \
MethodOptions
from jinja2 import Template
from simplejson import dumps
import yang_options_pb2
from netconf.protos.third_party.google.api import annotations_pb2, http_pb2
_ = annotations_pb2, http_pb2 # to keep import line from being optimized out
template = Template("""
# Generated file; please do not edit
from simplejson import dumps, load
from structlog import get_logger
from google.protobuf.json_format import MessageToDict, ParseDict
from twisted.internet.defer import inlineCallbacks, returnValue
{% set package = file_name.replace('.proto', '') %}
{% for pypackage, module in includes %}
{% if pypackage %}
from {{ pypackage }} import {{ module }}
{% else %}
import {{ module }}
{% endif %}
{% endfor %}
log = get_logger()
{% for method in methods %}
{% set method_name = method['service'].rpartition('.')[2] + '_' + method['method'] %}
@inlineCallbacks
def {{ method_name }}(grpc_client, params, metadata, **kw):
log.info('{{ method_name }}', params=params, metadata=metadata, **kw)
data = params
data.update(kw)
try:
req = ParseDict(data, {{ type_map[method['input_type']] }}())
except Exception, e:
log.error('cannot-convert-to-protobuf', e=e, data=data)
raise
res, _ = yield grpc_client.invoke(
{{ type_map[method['service']] }}Stub,
'{{ method['method'] }}', req, metadata)
try:
out_data = grpc_client.convertToDict(res)
except AttributeError, e:
filename = '/tmp/netconf_failed_to_convert_data.pbd'
with file(filename, 'w') as f:
f.write(res.SerializeToString())
log.error('cannot-convert-from-protobuf', outdata_saved=filename)
raise
log.info('{{ method_name }}', **out_data)
returnValue(out_data)
def get_xml_tag_{{ method_name }}():
return '{{ method['xml_tag'] }}'
def get_list_items_name_{{ method_name }}():
return '{{ method['list_item_name'] }}'
def get_return_type_{{ method_name }}():
return '{{ type_map[method['output_type']] }}'
{% endfor %}
""", trim_blocks=True, lstrip_blocks=True)
def traverse_methods(proto_file):
package = proto_file.name
for service in proto_file.service:
assert isinstance(service, ServiceDescriptorProto)
for method in service.method:
input_type = method.input_type
if input_type.startswith('.'):
input_type = input_type[1:]
output_type = method.output_type
if output_type.startswith('.'):
output_type = output_type[1:]
# Process any specific yang option
xml_tag = ''
list_item_name = ''
options = method.options
assert isinstance(options, MethodOptions)
for fd, yang_tag in options.ListFields():
if fd.full_name == 'voltha.yang_xml_tag':
if yang_tag.xml_tag:
xml_tag = yang_tag.xml_tag
if yang_tag.list_items_name:
list_item_name = yang_tag.list_items_name
data = {
'package': package,
'filename': proto_file.name,
'service': proto_file.package + '.' + service.name,
'method': method.name,
'input_type': input_type,
'output_type': output_type,
'xml_tag': xml_tag,
'list_item_name': list_item_name
}
yield data
def generate_gw_code(file_name, methods, type_map, includes):
return template.render(file_name=file_name, methods=methods,
type_map=type_map, includes=includes)
class IncludeManager(object):
# need to keep track of what files define what message types and
# under what package name. Later, when we analyze the methods, we
# need to be able to derive the list of files we need to load and we
# also need to replce the <proto-package-name>.<artifact-name> in the
# templates with <python-package-name>.<artifact-name> so Python can
# resolve these.
def __init__(self):
self.package_to_localname = {}
self.fullname_to_filename = {}
self.prefix_table = [] # sorted table of top-level symbols in protos
self.type_map = {} # full name as used in .proto -> python name
self.includes_needed = set() # names of files needed to be included
self.filename_to_module = {} # filename -> (package, module)
def extend_symbol_tables(self, proto_file):
# keep track of what file adds what top-level symbol to what abstract
# package name
package_name = proto_file.package
file_name = proto_file.name
self._add_filename(file_name)
all_defs = list(proto_file.message_type)
all_defs.extend(list(proto_file.enum_type))
all_defs.extend(list(proto_file.service))
for typedef in all_defs:
name = typedef.name
fullname = package_name + '.' + name
self.fullname_to_filename[fullname] = file_name
self.package_to_localname.setdefault(package_name, []).append(name)
self._update_prefix_table()
def _add_filename(self, filename):
if filename not in self.filename_to_module:
python_path = filename.replace('.proto', '_pb2').replace('/', '.')
package_name, _, module_name = python_path.rpartition('.')
self.filename_to_module[filename] = (package_name, module_name)
def _update_prefix_table(self):
# make a sorted list symbol prefixes needed to resolv for potential use
# of nested symbols
self.prefix_table = sorted(self.fullname_to_filename.iterkeys(),
reverse=True)
def _find_matching_prefix(self, fullname):
for prefix in self.prefix_table:
if fullname.startswith(prefix):
return prefix
# This should never happen
raise Exception('No match for type name "{}"'.format(fullname))
def add_needed_symbol(self, fullname):
if fullname in self.type_map:
return
top_level_symbol = self._find_matching_prefix(fullname)
name = top_level_symbol.rpartition('.')[2]
nested_name = fullname[len(top_level_symbol):] # may be empty
file_name = self.fullname_to_filename[top_level_symbol]
self.includes_needed.add(file_name)
module_name = self.filename_to_module[file_name][1]
python_name = module_name + '.' + name + nested_name
self.type_map[fullname] = python_name
def get_type_map(self):
return self.type_map
def get_includes(self):
return sorted(
self.filename_to_module[fn] for fn in self.includes_needed)
def generate_code(request, response):
assert isinstance(request, plugin.CodeGeneratorRequest)
include_manager = IncludeManager()
for proto_file in request.proto_file:
include_manager.extend_symbol_tables(proto_file)
methods = []
for data in traverse_methods(proto_file):
methods.append(data)
include_manager.add_needed_symbol(data['input_type'])
include_manager.add_needed_symbol(data['output_type'])
include_manager.add_needed_symbol(data['service'])
type_map = include_manager.get_type_map()
includes = include_manager.get_includes()
# as a nice side-effect, generate a json file capturing the essence
# of the RPC method entries
f = response.file.add()
f.name = proto_file.name + '.json'
f.content = dumps(dict(
type_rename_map=type_map,
includes=includes,
methods=methods), indent=4)
# generate the real Python code file
f = response.file.add()
assert proto_file.name.endswith('.proto')
f.name = proto_file.name.replace('.proto', '_rpc_gw.py')
f.content = generate_gw_code(proto_file.name,
methods, type_map, includes)
if __name__ == '__main__':
if len(sys.argv) >= 2:
# read input from file, to allow troubleshooting
with open(sys.argv[1], 'r') as f:
data = f.read()
else:
# read input from stdin
data = sys.stdin.read()
# parse request
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
# create response object
response = plugin.CodeGeneratorResponse()
# generate the output and the response
generate_code(request, response)
# serialize the response
output = response.SerializeToString()
# write response to stdout
sys.stdout.write(output)
| apache-2.0 | 5,249,324,388,240,249,000 | 34.657895 | 85 | 0.620664 | false | 3.976939 | false | false | false |
fdr/wal-e | tests/test_wal_segment.py | 11 | 2463 | import pytest
from stage_pgxlog import pg_xlog
from wal_e import worker
from wal_e import exception
# Quiet pyflakes about pytest fixtures.
assert pg_xlog
def make_segment(num, **kwargs):
return worker.WalSegment('pg_xlog/' + str(num) * 8 * 3, **kwargs)
def test_simple_create():
"""Check __init__."""
make_segment(1)
def test_mark_done_invariant():
"""Check explicit segments cannot be .mark_done'd."""
seg = make_segment(1, explicit=True)
with pytest.raises(exception.UserCritical):
seg.mark_done()
def test_mark_done(pg_xlog):
"""Check non-explicit segments can be .mark_done'd."""
seg = make_segment(1, explicit=False)
pg_xlog.touch(seg.name, '.ready')
seg.mark_done()
def test_mark_done_problem(pg_xlog, monkeypatch):
"""Check that mark_done fails loudly if status file is missing.
While in normal operation, WAL-E does not expect races against
other processes manipulating .ready files. But, just in case that
should occur, WAL-E is designed to crash, exercised here.
"""
seg = make_segment(1, explicit=False)
with pytest.raises(exception.UserCritical):
seg.mark_done()
def test_simple_search(pg_xlog):
"""Must find a .ready file"""
name = '1' * 8 * 3
pg_xlog.touch(name, '.ready')
segs = worker.WalSegment.from_ready_archive_status('pg_xlog')
assert segs.next().path == 'pg_xlog/' + name
with pytest.raises(StopIteration):
segs.next()
def test_multi_search(pg_xlog):
"""Test finding a few ready files.
Also throw in some random junk to make sure they are filtered out
from processing correctly.
"""
for i in xrange(3):
ready = str(i) * 8 * 3
pg_xlog.touch(ready, '.ready')
# Throw in a complete segment that should be ignored.
complete_segment_name = 'F' * 8 * 3
pg_xlog.touch(complete_segment_name, '.done')
# Throw in a history-file-alike that also should not be found,
# even if it's ready.
ready_history_file_name = ('F' * 8) + '.history'
pg_xlog.touch(ready_history_file_name, '.ready')
segs = worker.WalSegment.from_ready_archive_status(str(pg_xlog.pg_xlog))
for i, seg in enumerate(segs):
assert seg.name == str(i) * 8 * 3
assert i == 2
# Make sure nothing interesting happened to ignored files.
pg_xlog.assert_exists(complete_segment_name, '.done')
pg_xlog.assert_exists(ready_history_file_name, '.ready')
| bsd-3-clause | -4,511,927,377,189,034,000 | 26.674157 | 76 | 0.656922 | false | 3.30604 | true | false | false |
pwyf/IATI-Implementation-Schedules | impschedules/isfunctions.py | 1 | 5474 | import json
from flask import Flask, current_app, request
from functools import wraps
import collections
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs):
return current_app.response_class(json.dumps(dict(*args, **kwargs),
indent=None if request.is_xhr else 2, cls=JSONEncoder),
mimetype='application/json')
def support_jsonp(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f().data) + ')'
return current_app.response_class(content, mimetype='application/json')
else:
return f(*args, **kwargs)
return decorated_function
def support_jsonp_publishercode(f):
"""Wraps JSONified output for JSONP"""
@wraps(f)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
content = str(callback) + '(' + str(f(kwargs['publisher_code']).data) + ')'
return current_app.response_class(content, mimetype='application/json')
else:
return f(*args, **kwargs)
return decorated_function
def publication_timeline(data, cumulative=False, group=6, provide={7,2}, label_group="date", label_provide={"count", "element"}):
properties = set(map(lambda x: (str(x[group])), data))
b = map(lambda x: (x[group],list(map(lambda y: str(x[y]), provide))), data)
out = {}
out['publication'] = {}
out['publication_sorted'] = {}
out['unknown'] = {}
for s, k in b:
try:
if (k[0] != "None"):
out['publication'][str(s)].update({(k[0], k[1])})
else:
out['unknown'][str(s)].update({(k[0], k[1])})
except KeyError:
if (k[0] != "None"):
out['publication'][str(s)] = {}
out['publication'][str(s)].update({(k[0], k[1])})
else:
out['unknown'][str(s)] = {}
out['unknown'][str(s)].update({(k[0], k[1])})
for t in out:
try:
a=out[t]
except KeyError:
out[t] = 0
out['publication_sorted'] = []
for e, v in out['publication'].items():
prevkey_val = 0
latest_count = {}
try:
del v["None"]
except KeyError:
pass
for key in sorted(v.iterkeys()):
newdata = {}
newdata[label_group] = e
if (cumulative == True):
try:
latest_count[e] = int(v[key])
except KeyError:
latest_count[e] = 0
prevkey_val = int(v[key]) + prevkey_val
newdata["count"] = int(v[key]) + latest_count[e]
newdata["element"] = key
out['publication_sorted'].append(newdata)
return out
def publication_dates_groups(data, cumulative=False, group=6, provide={7,2}, label_group="date", label_provide={"count", "element"}):
dates = set(map(lambda x: (str(x[group])), data))
elements = set(map(lambda x: (x[2]), data))
alldata = map(lambda x: ((str(x[group]), x[2]),x[7]), data)
b = map(lambda x: (x[group],list(map(lambda y: str(x[y]), provide))), data)
out = {}
out["dates"] = []
prev_values = {}
for p in sorted(dates):
# get each element
newdata = {}
newdata["date"] = p
for e in elements:
try:
prev_values[e]
except KeyError:
prev_values[e] = 0
newdata[e] = 0
for data in alldata:
if ((data[0][0] == p) and (data[0][1]==e)):
newdata[e] = data[1]
prev_values[e] = prev_values[e] + data[1]
else:
newdata[e] = prev_values[e]
if (newdata[e] == 0):
newdata[e] = prev_values[e]
out["dates"].append(newdata)
# get each date
return out
def nest_compliance_results(data):
properties = set(map(lambda x: (x[2]), data))
b = map(lambda x: (x[2],(x[6], x[7])), data)
out = {}
for s, k in b:
try:
out[s].update({(k[0], k[1])})
except KeyError:
out[s] = {}
out[s].update({(k[0], k[1])})
values = {'fc', 'pc', 'uc', 'fp', 'up'}
for t in out:
for v in values:
try:
a=out[t][v]
except KeyError:
out[t][v] = 0
return out
def toUs(element):
# replace hyphen with underscore
us = re.sub("-", "_", element)
return us
def merge_dict(d1, d2):
# from here: http://stackoverflow.com/questions/10703858/python-merge-multi-level-dictionaries
"""
Modifies d1 in-place to contain values from d2. If any value
in d1 is a dictionary (or dict-like), *and* the corresponding
value in d2 is also a dictionary, then merge them in-place.
"""
for k,v2 in d2.items():
v1 = d1.get(k) # returns None if v1 has no value for this key
if ( isinstance(v1, collections.Mapping) and
isinstance(v2, collections.Mapping) ):
merge_dict(v1, v2)
else:
d1[k] = v2 | agpl-3.0 | 6,427,362,584,844,358,000 | 33.433962 | 133 | 0.523201 | false | 3.594222 | false | false | false |
rspavel/spack | var/spack/repos/builtin/packages/ibm-databroker/package.py | 1 | 1845 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class IbmDatabroker(CMakePackage, PythonPackage):
"""The Data Broker (DBR) is a distributed, in-memory container of key-value
stores enabling applications in a workflow to exchange data through one or
more shared namespaces. Thanks to a small set of primitives, applications
in a workflow deployed in a (possibly) shared nothing distributed cluster,
can easily share and exchange data and messages with applications."""
homepage = "https://github.com/IBM/data-broker"
git = "https://github.com/IBM/data-broker"
url = 'https://github.com/IBM/data-broker/archive/0.6.1.tar.gz'
# IBM dev team should take over
maintainers = ['bhatiaharsh']
version('master', branch='master')
version('0.7.0', sha256='5460fa1c5c05ad25c759b2ee4cecee92980d4dde5bc7c5f6da9242806cf22bb8')
version('0.6.1', sha256='2c7d6c6a269d4ae97aad4d770533e742f367da84758130c283733f25df83e535')
version('0.6.0', sha256='5856209d965c923548ebb69119344f1fc596d4c0631121b230448cc91bac4290')
variant('python', default=False, description='Build Python bindings')
depends_on('cmake@2.8:', type='build')
depends_on('redis@5.0.2:', type='run')
depends_on('libevent@2.1.8', type=('build', 'run'))
extends('python@3.7:', when='+python')
depends_on('py-setuptools', when='+python')
patch('fixes_in_v0.6.1.patch', when='@0.6.1')
patch('fixes_in_v0.7.0.patch', when='@0.7.0')
def cmake_args(self):
args = []
args.append('-DDEFAULT_BE=redis')
if '+python' in self.spec:
args.append('-DPYDBR=1')
return args
| lgpl-2.1 | 8,924,453,438,641,355,000 | 39.108696 | 95 | 0.687805 | false | 3.116554 | false | false | false |
debayan/jatayu | botmodules/WeekendModel.py | 1 | 2878 | '''
Copyright 2016 Debayan Banerjee, Shreyank Gupta
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib2,json
from random import randint
class WeekendModel(object):
dc = {}
def __init__(self, logger):
self.logger = logger
self.restaurant_name = None
self.restaurant_address = None
self.movie_name = None
self.zomatokey = '39e91731af08d26261adf655948d9daa'
def ifintentclear(self, stt, text=None, reply=[]):
if 'movie' in text.lower() or 'restaurant' in text.lower():
return True
else:
return False
def ifintentmovie(self, stt, text=None, reply=[]):
if 'movie' in text.lower():
req = urllib2.Request("http://www.omdbapi.com/?s=bot")
contents = urllib2.urlopen(req).read()
d = json.loads(str(contents))
sub = randint(0,9)
self.movie_name = d['Search'][sub]['Title']
return True
else:
return False
def ifintentrestaurant(self, stt, text=None, reply=[]):
if 'restaurant' in text.lower():
return True
else:
return False
def ifvalidcity(self, stt, text=None, reply=[]):
req = urllib2.Request("https://developers.zomato.com/api/v2.1/locations?query=%s"%text, headers={'user-key': '39e91731af08d26261adf655948d9daa','Accept':'application/json'})
contents = urllib2.urlopen(req).read()
d = json.loads(contents)
if len(d['location_suggestions']) == 0:
return False
else:
entity_id = d['location_suggestions'][0]['entity_id']
entity_type = d['location_suggestions'][0]['entity_type']
d = {}
req = urllib2.Request("https://developers.zomato.com/api/v2.1/location_details?entity_id=%s&entity_type=%s"%(entity_id, entity_type), headers={'user-key': '39e91731af08d26261adf655948d9daa','Accept':'application/json'})
contents = urllib2.urlopen(req).read()
d = json.loads(contents)
self.restaurant_name = d['best_rated_restaurant'][0]['restaurant']['name']
self.restaurant_address = d['best_rated_restaurant'][0]['restaurant']['location']['address']
return True
| gpl-3.0 | -2,802,824,964,411,021,000 | 40.710145 | 230 | 0.628909 | false | 3.878706 | false | false | false |
tdeboissiere/DeepLearningImplementations | InfoGAN/src/model/models.py | 1 | 9122 | from keras.models import Model
from keras.layers.core import Flatten, Dense, Dropout, Activation, Lambda, Reshape
from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D
from keras.layers import Input, merge
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling2D
import keras.backend as K
def generator_upsampling(cat_dim, cont_dim, noise_dim, img_dim, bn_mode, model_name="generator_upsampling", dset="mnist"):
"""
Generator model of the DCGAN
args : img_dim (tuple of int) num_chan, height, width
pretr_weights_file (str) file holding pre trained weights
returns : model (keras NN) the Neural Net model
"""
s = img_dim[1]
f = 128
if dset == "mnist":
start_dim = int(s / 4)
nb_upconv = 2
else:
start_dim = int(s / 16)
nb_upconv = 4
if K.image_dim_ordering() == "th":
bn_axis = 1
reshape_shape = (f, start_dim, start_dim)
output_channels = img_dim[0]
else:
reshape_shape = (start_dim, start_dim, f)
bn_axis = -1
output_channels = img_dim[-1]
cat_input = Input(shape=cat_dim, name="cat_input")
cont_input = Input(shape=cont_dim, name="cont_input")
noise_input = Input(shape=noise_dim, name="noise_input")
gen_input = merge([cat_input, cont_input, noise_input], mode="concat")
x = Dense(1024)(gen_input)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Dense(f * start_dim * start_dim)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Reshape(reshape_shape)(x)
# Upscaling blocks
for i in range(nb_upconv):
x = UpSampling2D(size=(2, 2))(x)
nb_filters = int(f / (2 ** (i + 1)))
x = Conv2D(nb_filters, (3, 3), padding="same")(x)
x = BatchNormalization(axis=bn_axis)(x)
x = Activation("relu")(x)
# x = Conv2D(nb_filters, (3, 3), padding="same")(x)
# x = BatchNormalization(axis=bn_axis)(x)
# x = Activation("relu")(x)
x = Conv2D(output_channels, (3, 3), name="gen_Conv2D_final", padding="same", activation='tanh')(x)
generator_model = Model(inputs=[cat_input, cont_input, noise_input], outputs=[x], name=model_name)
return generator_model
def generator_deconv(cat_dim, cont_dim, noise_dim, img_dim, bn_mode, batch_size, model_name="generator_deconv", dset="mnist"):
"""
Generator model of the DCGAN
args : nb_classes (int) number of classes
img_dim (tuple of int) num_chan, height, width
pretr_weights_file (str) file holding pre trained weights
returns : model (keras NN) the Neural Net model
"""
assert K.backend() == "tensorflow", "Deconv not implemented with theano"
s = img_dim[1]
f = 128
if dset == "mnist":
start_dim = int(s / 4)
nb_upconv = 2
else:
start_dim = int(s / 16)
nb_upconv = 4
reshape_shape = (start_dim, start_dim, f)
bn_axis = -1
output_channels = img_dim[-1]
cat_input = Input(shape=cat_dim, name="cat_input")
cont_input = Input(shape=cont_dim, name="cont_input")
noise_input = Input(shape=noise_dim, name="noise_input")
gen_input = merge([cat_input, cont_input, noise_input], mode="concat")
x = Dense(1024)(gen_input)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Dense(f * start_dim * start_dim)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Reshape(reshape_shape)(x)
# Transposed conv blocks
for i in range(nb_upconv - 1):
nb_filters = int(f / (2 ** (i + 1)))
s = start_dim * (2 ** (i + 1))
o_shape = (batch_size, s, s, nb_filters)
x = Deconv2D(nb_filters, (3, 3), output_shape=o_shape, strides=(2, 2), padding="same")(x)
x = BatchNormalization(mode=2, axis=bn_axis)(x)
x = Activation("relu")(x)
# Last block
s = start_dim * (2 ** (nb_upconv))
o_shape = (batch_size, s, s, output_channels)
x = Deconv2D(output_channels, (3, 3), output_shape=o_shape, strides=(2, 2), padding="same")(x)
x = Activation("tanh")(x)
generator_model = Model(inputs=[cat_input, cont_input, noise_input], outputs=[x], name=model_name)
return generator_model
def DCGAN_discriminator(cat_dim, cont_dim, img_dim, bn_mode, model_name="DCGAN_discriminator", dset="mnist", use_mbd=False):
"""
Discriminator model of the DCGAN
args : img_dim (tuple of int) num_chan, height, width
pretr_weights_file (str) file holding pre trained weights
returns : model (keras NN) the Neural Net model
"""
if K.image_dim_ordering() == "th":
bn_axis = 1
else:
bn_axis = -1
disc_input = Input(shape=img_dim, name="discriminator_input")
if dset == "mnist":
list_f = [128]
else:
list_f = [64, 128, 256]
# First conv
x = Conv2D(64, (3, 3), strides=(2, 2), name="disc_Conv2D_1", padding="same")(disc_input)
x = LeakyReLU(0.2)(x)
# Next convs
for i, f in enumerate(list_f):
name = "disc_Conv2D_%s" % (i + 2)
x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding="same")(x)
x = BatchNormalization(axis=bn_axis)(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
x = Dense(1024)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.2)(x)
def linmax(x):
return K.maximum(x, -16)
def linmax_shape(input_shape):
return input_shape
# More processing for auxiliary Q
x_Q = Dense(128)(x)
x_Q = BatchNormalization()(x_Q)
x_Q = LeakyReLU(0.2)(x_Q)
x_Q_Y = Dense(cat_dim[0], activation='softmax', name="Q_cat_out")(x_Q)
x_Q_C_mean = Dense(cont_dim[0], activation='linear', name="dense_Q_cont_mean")(x_Q)
x_Q_C_logstd = Dense(cont_dim[0], name="dense_Q_cont_logstd")(x_Q)
x_Q_C_logstd = Lambda(linmax, output_shape=linmax_shape)(x_Q_C_logstd)
# Reshape Q to nbatch, 1, cont_dim[0]
x_Q_C_mean = Reshape((1, cont_dim[0]))(x_Q_C_mean)
x_Q_C_logstd = Reshape((1, cont_dim[0]))(x_Q_C_logstd)
x_Q_C = merge([x_Q_C_mean, x_Q_C_logstd], mode="concat", name="Q_cont_out", concat_axis=1)
def minb_disc(z):
diffs = K.expand_dims(z, 3) - K.expand_dims(K.permute_dimensions(z, [1, 2, 0]), 0)
abs_diffs = K.sum(K.abs(diffs), 2)
z = K.sum(K.exp(-abs_diffs), 2)
return z
def lambda_output(input_shape):
return input_shape[:2]
num_kernels = 300
dim_per_kernel = 5
M = Dense(num_kernels * dim_per_kernel, use_bias=False, activation=None)
MBD = Lambda(minb_disc, output_shape=lambda_output)
if use_mbd:
x_mbd = M(x)
x_mbd = Reshape((num_kernels, dim_per_kernel))(x_mbd)
x_mbd = MBD(x_mbd)
x = merge([x, x_mbd], mode='concat')
# Create discriminator model
x_disc = Dense(2, activation='softmax', name="disc_out")(x)
discriminator_model = Model(inputs=[disc_input], outputs=[x_disc, x_Q_Y, x_Q_C], name=model_name)
return discriminator_model
def DCGAN(generator, discriminator_model, cat_dim, cont_dim, noise_dim):
cat_input = Input(shape=cat_dim, name="cat_input")
cont_input = Input(shape=cont_dim, name="cont_input")
noise_input = Input(shape=noise_dim, name="noise_input")
generated_image = generator([cat_input, cont_input, noise_input])
x_disc, x_Q_Y, x_Q_C = discriminator_model(generated_image)
DCGAN = Model(inputs=[cat_input, cont_input, noise_input],
outputs=[x_disc, x_Q_Y, x_Q_C],
name="DCGAN")
return DCGAN
def load(model_name, cat_dim, cont_dim, noise_dim, img_dim, bn_mode, batch_size, dset="mnist", use_mbd=False):
if model_name == "generator_upsampling":
model = generator_upsampling(cat_dim, cont_dim, noise_dim, img_dim, bn_mode, model_name=model_name, dset=dset)
model.summary()
from keras.utils import plot_model
plot_model(model, to_file='../../figures/%s.png' % model_name, show_shapes=True, show_layer_names=True)
return model
if model_name == "generator_deconv":
model = generator_deconv(cat_dim, cont_dim, noise_dim, img_dim, bn_mode,
batch_size, model_name=model_name, dset=dset)
model.summary()
from keras.utils import plot_model
plot_model(model, to_file='../../figures/%s.png' % model_name, show_shapes=True, show_layer_names=True)
return model
if model_name == "DCGAN_discriminator":
model = DCGAN_discriminator(cat_dim, cont_dim, img_dim, bn_mode,
model_name=model_name, dset=dset, use_mbd=use_mbd)
model.summary()
from keras.utils import plot_model
plot_model(model, to_file='../../figures/%s.png' % model_name, show_shapes=True, show_layer_names=True)
return model
if __name__ == '__main__':
m = generator_deconv((10,), (2,), (64,), (28, 28, 1), 2, 1, model_name="generator_deconv", dset="mnist")
m.summary()
| mit | -1,602,562,786,555,748,600 | 33.164794 | 126 | 0.604363 | false | 3.038641 | false | false | false |
le717/ICU-ReDirect | ReDirect.py | 1 | 14189 | """
This file is part of ICU (LEGO Island Configuration Utility)
ICU - A collection of LEGO Island Configuration Tools
Created 2012-2013 Triangle717 <http://triangle717.wordpress.com>
ICU is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ICU is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ICU. If not, see <http://www.gnu.org/licenses/>.
"""
# ICU ReDirect V2
# Part of ICU (LEGO Island Configuration Utility)
# https://github.com/le717/ICU
# General use modules
import os
import sys
import time
# Main use modules
import winreg
import glob
import shutil
# Special purpose modules
import platform
import webbrowser
# Logging Code
import logging
import yourscorecube
# GUI elements
import tkinter
from tkinter import filedialog
# Global variables
app = "ICU ReDirect"
majver = "Version 2.0"
minver = "Stable"
creator = "Triangle717"
game = "LEGO Island"
# ------------ Begin ICU ReDirect Initialization ------------ #
def preload():
'''Python 3.3.0 and Windows Architecture check'''
logging.info("Begin logging to {0}".format(yourscorecube.logging_file))
logging.info('''
#############################################
{0} {1} {2}
Copyright 2013 {3}
YourScoreCube.log
If you run into a bug, open an issue at
https://github.com/le717/ICU/issues
and attach this file for an easier fix!
#############################################
'''.format(app, majver, minver, creator))
# You need to have at least Python 3.3.0 to run ICU ReDirect
if sys.version_info < (3, 3, 0):
logging.warning('''You are not running Python 3.3.0 or higher!
You need to get a newer version to run {0}'''.format(app))
sys.stdout.write('''\nYou need to download Python 3.3.0 or greater
to run {0} {1} {2}.'''.format(app, majver, minver))
# Don't open browser immediately
time.sleep(2)
logging.info("Open Python download page in a new tab in web browser.")
# New tab, raise browser window (if possible)
webbrowser.open_new_tab("http://python.org/download")
# Close ICU ReDirect
logging.info("Display error message for three seconds")
time.sleep(3)
logging.info("{0} is shutting down.".format(app))
raise SystemExit(0)
# If you are running Python 3.3.0
else:
logging.info('''You are running Python 3.3.0 or greater.
{0} will continue.'''.format(app))
# Declare osbit global variable
global osbit
# User is running 64-bit Windows
if platform.machine() == 'AMD64':
logging.info("User is running 64-bit Windows.")
osbit = "x64"
main()
# User is running 32-bit Windows
elif platform.machine() == 'x86':
logging.info("User is running 32-bit Windows.")
osbit = "x86"
main()
# The user is running an unsupported version of Windows!
else:
logging.warning("User is running an unsupported OS!")
print("\nYou are running an unsupported OS! {0} will now close."
.format(app))
time.sleep(3)
logging.info("{0} is shutting down".format(app))
raise SystemExit(0)
# ------------ End ICU ReDirect Initialization ------------ #
# ------------ Begin ICU ReDirect Menu Layout ------------ #
def main():
'''ICU ReDirect Menu Layout'''
print("\nWelcome to {0} {1} {2}\nCreated 2012-2013 {3}".format(
app, majver, minver, creator))
print('''\nPlease make a selection:\n
[r] ReDirect Save Games
[q] Quit''')
menuopt = input("\n> ")
while True:
if menuopt.lower() == "r":
logging.info("User pressed '[r] ReDirect Save Games'")
ReDirect()
elif menuopt.lower() == "q":
logging.info("User pressed '[q] Quit'")
print("\nGoodbye!")
time.sleep(3)
logging.info('''{0} is shutting down.
'''.format(app))
raise SystemExit(0)
# Undefined input
else:
logging.info("User pressed an undefined key")
main()
# ------------ End ICU ReDirect Menu Layout ------------ #
# ------------ Begin Save Game ReDirect Intro ------------ #
def ReDirect():
'''Save Game ReDirect Launcher'''
# Switch to 32-bit registry string code
if osbit == "x86":
logging.info("User is running 32-bit (x86) Windows, use x86 Registry Strings")
eightsixReDirect()
# Switch to 64-bit registry string code
elif osbit == 'x64':
logging.info("User is running 64-bit (x64) Windows, use x64 Registry Strings")
sixfourReDirect()
# ------------ End Save Game ReDirect Intro ------------ #
# ------------ Begin Save Game ReDirect code for Windows x86 ------------ #
def eightsixReDirect():
'''Redirects LEGO Island Save Games on Windows x86'''
logging.info("'Open HKEY_LOCAL_MACHINE\SOFTWARE\Mindscape\LEGO Island\savepath' for reading")
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\Mindscape\LEGO Island', 0, winreg.KEY_READ) as oldx86save:
oldx86path = winreg.QueryValueEx(oldx86save, 'savepath')
# Convert tuple to str(ing)
logging.info("Convert tuple returned by registry string to a string")
oldx86path = "".join(str(oldx86path))
# Clean up string to get a clean folder path
logging.info("Cleaning up folder path...")
oldx86path = oldx86path.strip("(''), 1")
# Tell where current save games are located
logging.info("Your {0} Save Games are currently located at {1}".format(game,
oldx86path))
print('\nYour {0} Save Games are currently located at\n"{1}"'.format(game,
oldx86path))
time.sleep(2)
# Draw (then withdraw) the root Tk window
logging.info("Drawing root Tk window")
root = tkinter.Tk()
logging.info("Withdrawing root Tk window")
root.withdraw()
# Select where you want your Save Games to be moved to
# TODO: Make dialog active window automatically and
# do the same to main window when closed.
logging.info("Display folder selection dialog for new Save Game Location.")
newsavepath = filedialog.askdirectory(
title="Please select the new location for your {0} Save Games:".format(
game))
# The user clicked cancel
if len(newsavepath) == 0:
logging.warning("User canceled the Save Game redirection!")
print("\nCanceling Save Game ReDirection...\n")
time.sleep(1)
main()
# The user selected a folder
else:
logging.info("User selected a new Save Game location at {0}".format(
newsavepath))
try:
# This checks for any *.GS files in savepath, and deletes them
# This has to be done because Windows does not allow
# a file to be overwritten. :|
for root, dir, files in os.walk(newsavepath):
for gsfile in files:
if gsfile.upper().endswith(".GS"):
os.unlink(os.path.join(newsavepath, gsfile))
# This checks for any *.gsi files in savepath, and deletes them
# This has to be done because Windows does not allow
# a file to be overwritten. :|
for root, dir, files in os.walk(newsavepath):
for gsifile in files:
if gsifile.lower().endswith(".gsi"):
os.unlink(os.path.join(newsavepath, gsifile))
# Move all *.GS files to the new path
for gsfile in glob.glob("{0}/*.GS".format(oldx86path)):
shutil.move(gsfile, newsavepath)
# Move all *.gsi files to the new path
for gsifile in glob.glob("{0}/*.gsi".format(oldx86path)):
shutil.move(gsifile, newsavepath)
'''So the final workflow is: if file exists: delete, then move.
if not exists: move'''
# Write Registry String with new path
logging.info("'Write HKEY_LOCAL_MACHINE\SOFTWARE\Mindscape\LEGO Island\savepath' with new save path")
with winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\Mindscape\LEGO Island') as newx86savekey:
winreg.SetValueEx(newx86savekey, "savepath",
0, winreg.REG_SZ, newsavepath)
# Save games sucessfully redirected! :D
print('\n{0} save games sucessfully redirected to "{1}".'.format(game,
newsavepath))
# The User does not have the rights to redirect the save games! D:
except PermissionError:
logging.warning('''{0} does not have the user rights to operate!
Please relaunch {0} as an Administrator.'''.format(app))
print('''\n{0} does not have the user rights to operate!
Please relaunch {0} as an Administrator.'''.format(app))
# Go back to main menu no matter the outcome
finally:
time.sleep(3)
main()
# ------------ End Save Game ReDirect code for Windows x86 ------------ #
# ------------ Begin Save Game ReDirect code for Windows x64 ------------ #
def sixfourReDirect():
'''Redirects LEGO Island Save Games on Windows x64'''
logging.info("'Open HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mindscape\LEGO Island\savepath' for reading")
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\Wow6432Node\Mindscape\LEGO Island', 0,
winreg.KEY_READ) as oldx64save:
oldx64path = winreg.QueryValueEx(oldx64save, 'savepath')
# Convert tuple to str(ing)
logging.info("Convert tuple returned by registry string to a string")
oldx64path = "".join(str(oldx64path))
# Clean up string to get a clean folder path
logging.info("Cleaning up folder path...")
oldx64path = oldx64path.strip("(''), 1")
# Tell where current save games are located
logging.info("Your {0} Save Games are currently located at {1}".format(game,
oldx64path))
print('\nYour {0} Save Games are currently located at\n"{1}"'.format(game,
oldx64path))
time.sleep(2)
# Draw (then withdraw) the root Tk window
logging.info("Drawing root Tk window")
root = tkinter.Tk()
logging.info("Withdrawing root Tk window")
root.withdraw()
# Select where you want your Save Games to be moved to
# TODO: Make dialog active window automatically
# and do the same to main window when closed.
logging.info("Display folder selection dialog for new Save Game Location.")
newsavepath = filedialog.askdirectory(
title="Please select the new location for your {0} Save Games:".format(
game))
# The user clicked cancel
if len(newsavepath) == 0:
logging.warning("User canceled the Save Game redirection!")
print("\nCanceling Save Game ReDirection...\n")
time.sleep(1)
main()
# The user selected a folder
else:
logging.info("User selected a new Save Game location at {0}".format(
newsavepath))
try:
# This checks for any *.GS files in savepath, and deletes them
# This has to be done because Windows does not allow
# a file to be overwritten. :|
for root, dir, files in os.walk(newsavepath):
for gsfile in files:
if gsfile.upper().endswith(".GS"):
os.unlink(os.path.join(newsavepath, gsfile))
# This checks for any *.gsi files in savepath, and deletes them
# This has to be done because Windows does not allow
# a file to be overwritten. :|
for root, dir, files in os.walk(newsavepath):
for gsifile in files:
if gsifile.lower().endswith(".gsi"):
os.unlink(os.path.join(newsavepath, gsifile))
# Move all *.GS files to the new path
for gsfile in glob.glob("{0}/*.GS".format(oldx64path)):
shutil.move(gsfile, newsavepath)
# Move all *.gsi files to the new path
for gsifile in glob.glob("{0}/*.gsi".format(oldx64path)):
shutil.move(gsifile, newsavepath)
'''So the final workflow is: if file exists: delete, then move.
if not exists: move'''
# Write Registry String with new path
logging.info("'Write HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Mindscape\LEGO Island\savepath' with new save path")
with winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\Wow6432Node\Mindscape\LEGO Island') as newx64savekey:
winreg.SetValueEx(newx64savekey, "savepath",
0, winreg.REG_SZ, newsavepath)
# Save games sucessfully redirected! :D
print('\n{0} save games sucessfully redirected to "{1}".'.format(game,
newsavepath))
# The User does not have the rights to redirect the save games! D:
except PermissionError:
logging.warning('''{0} does not have the user rights to operate!
Please relaunch {0} as an Administrator.'''.format(app))
print('''\n{0} does not have the user rights to operate!
Please relaunch {0} as an Administrator.'''.format(app))
# Go back to main menu no matter the outcome
finally:
time.sleep(3)
main()
# ------------ End Save Game ReDirect code for Windows x64 ------------ #
if __name__ == "__main__":
# Write window title (since there is no GUI)
os.system("title {0} {1} {2}".format(app, majver, minver))
# Run preload() to begin ICU ReDirect Initialization
preload() | gpl-3.0 | -2,704,006,476,380,678,700 | 34.475 | 121 | 0.608006 | false | 3.922864 | false | false | false |
zurgzurg/pytoken | setup.py | 1 | 3578 | import sys
import os
import pdb
import shutil
import os.path
from distutils.command.clean import clean as _clean
from distutils.core import setup, Extension
from distutils import sysconfig
##################################################################
def customize_compiler2(compiler):
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext) = \
sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
if 0:
print "cc=", cc
print "cxx=", cxx
print "opt=", opt
print "cflags=", cflags
print "ccshared=", ccshared
cflags = cflags.replace("-DNDEBUG", "")
cflags = cflags.replace("-O2", "")
cpp = cc + " -E"
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc)
compiler.shared_lib_extension = so_ext
return
idx = None
for i, arg in enumerate(sys.argv):
if arg == "-debug":
idx = i
if idx:
sys.argv.pop(idx)
d = sysconfig.__dict__
d['customize_compiler'] = customize_compiler2
##################################################################
##
## the main module - escape
##
##################################################################
escape_module = Extension('escape',
sources = ['escapemodule.c'])
mlist = [escape_module]
##################################################################
##
## benchark support - most folks won't need this
##
##################################################################
idx = None
for i, arg in enumerate(sys.argv):
if arg == "-bmark":
idx = i
if idx:
sys.argv.pop(idx)
slist = ["bmarkmodule.c", "bmark_scan.c"]
obj = Extension("bmark", sources=slist)
mlist.append(obj)
# does distutils have a way to run flex?
cmd = "flex -f -s -B -L -obmark_scan.c -Pbmark bmark_scan.lex"
print "Running flex command"
print cmd
c = os.system(cmd)
if c != 0:
print "Flex return non-zero status. Stopping."
sys.exit(-1)
##################################################################
##
## custom clean func
##
##################################################################
class clean(_clean):
"""Custom clean routine to clean pyc files"""
def run(self):
_clean.run(self)
if os.path.exists("build"):
print "Removing build dir"
shutil.rmtree("build")
for f in os.listdir("."):
if f.endswith(".pyc") \
or f.endswith("~") \
or f.endswith(".s") \
or f.endswith(".o") \
or f in ("a.out", "pytoken.tar.gz"):
os.unlink(f)
for f in ["parser.out", "parsetab.py"]:
try:
os.unlink(f)
except OSError:
pass
return
pass
##################################################################
##
## toplevel
##
##################################################################
## how can I force setup to turn off -O ??
##
setup(name = 'pytoken',
version = '1.01',
description = 'Generates scanners for python.',
author = 'Ram Bhamidipaty',
author_email = 'rambham@gmail.com',
url = 'http://code.google.com/p/pytoken/',
ext_modules = mlist,
py_modules = ['pytoken', 'pytoken_ply_lex'],
cmdclass = {"clean" : clean} )
| bsd-3-clause | 4,125,798,810,888,287,000 | 26.523077 | 66 | 0.454723 | false | 4.103211 | false | false | false |
andrewebdev/django-adzone | adzone/south_migrations/0006_add_all_sites.py | 6 | 8725 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
Site = orm['sites.Site']
all_sites = Site.objects.all()
for ad in orm.AdBase.objects.all():
ad.sites = all_sites
def backwards(self, orm):
"Write your backwards methods here."
models = {
'adzone.adbase': {
'Meta': {'object_name': 'AdBase'},
'advertiser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['adzone.Advertiser']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['adzone.AdCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'since': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'start_showing': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'stop_showing': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(9999, 12, 29, 0, 0)'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['adzone.AdZone']"})
},
'adzone.adcategory': {
'Meta': {'ordering': "('title',)", 'object_name': 'AdCategory'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'adzone.adclick': {
'Meta': {'object_name': 'AdClick'},
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['adzone.AdBase']"}),
'click_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
},
'adzone.adimpression': {
'Meta': {'object_name': 'AdImpression'},
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['adzone.AdBase']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impression_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'source_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'})
},
'adzone.advertiser': {
'Meta': {'ordering': "('company_name',)", 'object_name': 'Advertiser'},
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'adzone.adzone': {
'Meta': {'ordering': "('title',)", 'object_name': 'AdZone'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'adzone.bannerad': {
'Meta': {'object_name': 'BannerAd', '_ormbases': ['adzone.AdBase']},
'adbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['adzone.AdBase']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
'adzone.textad': {
'Meta': {'object_name': 'TextAd', '_ormbases': ['adzone.AdBase']},
'adbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['adzone.AdBase']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['adzone']
symmetrical = True
| bsd-3-clause | -2,767,434,474,907,375,000 | 67.700787 | 182 | 0.544871 | false | 3.672138 | false | false | false |
hyperknot/hunairspace | pgairspace/features.py | 2 | 1686 | import geojson
import json
import os
from geojson import Feature, FeatureCollection
from .config import json_dir, geojson_dir
from .utils import read_json, write_file_contents
from .geom import sort_geojson
def make_features(border, process_raw_geometry):
print '---\nMaking features'
features = list()
for filename in os.listdir(json_dir):
json_file = os.path.join(json_dir, filename)
data = read_json(json_file)
for d in data:
if d['geom_raw'] == 'Lateral limits as for Budapest FIR': # TODO
continue
if d['geom_raw'] == 'along border AUSTRIA_HUNGARY then a clokwise arc radius centered on 7.6 KM 474052N 0164600E': # TODO
continue
geom = process_raw_geometry(d['geom_raw'], border)
# making union when geom_raw_union is present
if 'geom_raw_union' in d:
geom_and = process_raw_geometry(d['geom_raw_union'], border)
geom = geom.union(geom_and)
properties = {k: v for k, v in d.iteritems() if not k.startswith('geom')}
feature = Feature(geometry=geom, id=1, properties=properties)
features.append(feature)
return features
def write_geojsons(features):
classes = {f['properties']['class'] for f in features}
for cl in classes:
fc = FeatureCollection([f for f in features if f['properties']['class'] == cl])
geojson_data = sort_geojson(json.loads(geojson.dumps(fc)))
body = json.dumps(geojson_data, ensure_ascii=False, indent=2, sort_keys=True)
write_file_contents(os.path.join(geojson_dir, '{}.geojson'.format(cl)), body)
| mit | -1,923,401,199,989,005,800 | 30.222222 | 136 | 0.626928 | false | 3.681223 | false | false | false |
M4rtinK/anaconda | tests/nosetests/pyanaconda_tests/signal_test.py | 5 | 5383 | #
# Martin Kolman <mkolman@redhat.com>
#
# Copyright 2016 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
# Test the Python-based signal and slot implementation.
#
import unittest
from pyanaconda.core.signal import Signal
class FooClass(object):
def __init__(self):
self._var = None
@property
def var(self):
return self._var
def set_var(self, value):
self._var = value
class SignalTestCase(unittest.TestCase):
def setUp(self):
self.var = None
def method_test(self):
"""Test if a method can be correctly connected to a signal."""
signal = Signal()
foo = FooClass()
self.assertIsNone(foo.var)
# connect the signal
signal.connect(foo.set_var)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(foo.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(foo.var, "baz")
# now try to disconnect the signal
signal.disconnect(foo.set_var)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(foo.var, "baz")
def function_test(self):
"""Test if a local function can be correctly connected to a signal."""
# create a local function
def set_var(value):
self.var = value
signal = Signal()
self.assertIsNone(self.var)
# connect the signal
signal.connect(set_var)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(self.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(self.var, "baz")
# now try to disconnect the signal
signal.disconnect(set_var)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(self.var, "baz")
def lambda_test(self):
"""Test if a lambda can be correctly connected to a signal."""
foo = FooClass()
signal = Signal()
self.assertIsNone(foo.var)
# connect the signal
# pylint: disable=unnecessary-lambda
lambda_instance = lambda x: foo.set_var(x)
signal.connect(lambda_instance)
# trigger the signal
signal.emit("bar")
# check if the callback triggered correctly
self.assertEqual(foo.var, "bar")
# try to trigger the signal again
signal.emit("baz")
self.assertEqual(foo.var, "baz")
# now try to disconnect the signal
signal.disconnect(lambda_instance)
# check that calling the signal again
# no longer triggers the callback
signal.emit("anaconda")
self.assertEqual(foo.var, "baz")
def clear_test(self):
"""Test if the clear() method correctly clears any connected callbacks."""
def set_var(value):
self.var = value
signal = Signal()
foo = FooClass()
lambda_foo = FooClass()
self.assertIsNone(foo.var)
self.assertIsNone(lambda_foo.var)
self.assertIsNone(self.var)
# connect the callbacks
signal.connect(set_var)
signal.connect(foo.set_var)
# pylint: disable=unnecessary-lambda
signal.connect(lambda x: lambda_foo.set_var(x))
# trigger the signal
signal.emit("bar")
# check that the callbacks were triggered
self.assertEqual(self.var, "bar")
self.assertEqual(foo.var, "bar")
self.assertEqual(lambda_foo.var, "bar")
# clear the callbacks
signal.clear()
# trigger the signal again
signal.emit("anaconda")
# check that the callbacks were not triggered
self.assertEqual(self.var, "bar")
self.assertEqual(foo.var, "bar")
self.assertEqual(lambda_foo.var, "bar")
def signal_chain_test(self):
"""Check if signals can be chained together."""
foo = FooClass()
self.assertIsNone(foo.var)
signal1 = Signal()
signal1.connect(foo.set_var)
signal2 = Signal()
signal2.connect(signal1.emit)
signal3 = Signal()
signal3.connect(signal2.emit)
# trigger the chain
signal3.emit("bar")
# check if the initial callback was triggered
self.assertEqual(foo.var, "bar")
| gpl-2.0 | 5,196,488,187,354,119,000 | 33.50641 | 82 | 0.631618 | false | 4.262074 | true | false | false |
funkandwagnalls/pythonpentest | multi_process.py | 2 | 8441 | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: July 2015
Name: multi_process.py
Purpose: To identify live web applications with a list of IP addresses, using parallel processes
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import multiprocessing, urllib2, argparse, sys, logging, datetime, time
def host_request(host):
print("[*] Testing %s") % (str(host))
target = "http://" + host
target_secure = "https://" + host
timenow = time.time()
record = datetime.datetime.fromtimestamp(timenow).strftime('%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(record)
try:
request = urllib2.Request(target)
request.get_method = lambda : 'HEAD'
response = urllib2.urlopen(request)
response_data = str(response.info())
logger.debug("[*] %s" % response_data)
response.close()
except:
response = None
response_data = None
try:
request_secure = urllib2.urlopen(target_secure)
request_secure.get_method = lambda : 'HEAD'
response_secure = str(urllib2.urlopen(request_secure).read())
response_secure_data = str(response.info())
logger.debug("[*] %s" % response_secure_data)
response_secure.close()
except:
response_secure = None
response_secure_data = None
if response_data != None and response_secure_data != None:
r = "[+] Insecure webserver detected at %s reported by %s" % (target, str(multiprocessing.Process().name))
rs = "[+] Secure webserver detected at %s reported by %s" % (target_secure, str(multiprocessing.Process().name))
logger.debug("[+] Insecure web server detected at %s and reported by process %s" % (str(target), str(multiprocessing.Process().name)))
logger.debug("[+] Secure web server detected at %s and reported by process %s" % (str(target_secure), str(multiprocessing.Process().name)))
return(r, rs)
elif response_data == None and response_secure_data == None:
r = "[-] No insecure webserver at %s reported by %s" % (target, str(multiprocessing.Process().name))
rs = "[-] No secure webserver at %s reported by %s" % (target_secure, str(multiprocessing.Process().name))
logger.debug("[-] Insecure web server was not detected at %s and reported by process %s" % (str(target), str(multiprocessing.Process().name)))
logger.debug("[-] Secure web server was not detected at %s and reported by process %s" % (str(target_secure), str(multiprocessing.Process().name)))
return(r, rs)
elif response_data != None and response_secure_data == None:
r = "[+] Insecure webserver detected at %s reported by %s" % (target, str(multiprocessing.Process().name))
rs = "[-] No secure webserver at %s reported by %s" % (target_secure, str(multiprocessing.Process().name))
logger.debug("[+] Insecure web server detected at %s and reported by process %s" % (str(target), str(multiprocessing.Process().name)))
logger.debug("[-] Secure web server was not detected at %s and reported by process %s" % (str(target_secure), str(multiprocessing.Process().name)))
return(r, rs)
elif response_secure_data != None and response_data == None:
r = "[-] No insecure webserver at %s reported by %s" % (target, str(multiprocessing.Process().name))
rs = "[+] Secure webserver detected at %s reported by %s" % (target_secure, str(multiprocessing.Process().name))
logger.debug("[-] Insecure web server was not detected at %s and reported by process %s" % (str(target), str(multiprocessing.Process().name)))
logger.debug("[+] Secure web server detected at %s and reported by process %s" % (str(target_secure), str(multiprocessing.Process().name)))
return(r, rs)
else:
logger.debug("[-] No results were recorded for %s or %s" % (str(target), str(target_secure)))
def log_init(log):
level = logging.DEBUG # Logging level
format = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s") # Log format
logger_obj = logging.getLogger() # Getter for logging agent
file_handler = logging.FileHandler(log) # File Handler
#stderr_handler = logging.StreamHandler() # STDERR Handler
targets_list = []
# Configure logger formats for STDERR and output file
file_handler.setFormatter(format)
#stderr_handler.setFormatter(format)
# Configure logger object
logger_obj.addHandler(file_handler)
#logger_obj.addHandler(stderr_handler)
logger_obj.setLevel(level)
def main():
# If script is executed at the CLI
usage = '''usage: %(prog)s [-t hostfile] [-f logfile.log] [-m 2] -q -v -vv -vvv'''
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("-t", action="store", dest="targets", default=None, help="Filename for hosts to test")
parser.add_argument("-m", "--multi", action="store", dest="multiprocess", default=1, type=int, help="Number of proceses, defaults to 1")
parser.add_argument("-l", "--logfile", action="store", dest="log", default="results.log", type=str, help="The log file to output the results")
parser.add_argument("-v", action="count", dest="verbose", default=1, help="Verbosity level, defaults to one, this outputs each command and result")
parser.add_argument("-q", action="store_const", dest="verbose", const=0, help="Sets the results to be quiet")
parser.add_argument('--version', action='version', version='%(prog)s 0.42b')
args = parser.parse_args()
# Argument Validator
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if (args.targets == None):
parser.print_help()
sys.exit(1)
# Set Constructors
targets = args.targets # Targets to be parsed
verbose = args.verbose # Verbosity level
processes = args.multiprocess # Threads to be used
log = args.log # Configure the log output file
if ".log" not in log:
log = log + ".log"
# Load the targets into a list and remove trailing "\n"
with open(targets) as f:
targets_list = [line.rstrip() for line in f.readlines()]
# Establish thread list
pool = multiprocessing.Pool(processes=processes, initializer=log_init(log))
# Queue up the targets to assess
results = pool.map(host_request, targets_list)
for result in results:
for value in result:
print(value)
if __name__ == '__main__':
main()
| bsd-3-clause | 1,954,952,929,908,768,800 | 56.421769 | 155 | 0.637721 | false | 4.166338 | false | false | false |
omf2097/pyomftools | omftools/pyshadowdive/tournament.py | 1 | 3755 | import typing
from .protos import Entrypoint
from .sprite import Sprite
from .palette import Palette
from .pilot import Pilot
class TournamentFile(Entrypoint):
MAX_ENEMIES = 256
MAX_LOCALES = 10
__slots__ = (
"bk_name",
"winnings_multiplier",
"unknown_a",
"registration_fee",
"assumed_initial_value",
"tournament_id",
"pic_filename",
"locale_logos",
"locale_descriptions",
"locale_titles",
"locale_end_texts",
"palette",
"pilots",
)
def __init__(self):
self.bk_name: str = ""
self.winnings_multiplier: float = 0.0
self.unknown_a: int = 0
self.registration_fee: int = 0
self.assumed_initial_value: int = 0
self.tournament_id: int = 0
self.pic_filename: str = ""
self.palette: Palette = Palette()
self.locale_logos: typing.List[Sprite] = []
self.locale_descriptions: typing.List[str] = []
self.locale_titles: typing.List[str] = []
self.locale_end_texts: typing.List[typing.List[typing.List[str]]] = []
self.pilots: typing.List[Pilot] = []
def serialize(self):
return {
"bk_name": self.bk_name,
"winnings_multiplier": self.winnings_multiplier,
"unknown_a": self.unknown_a,
"registration_fee": self.registration_fee,
"assumed_initial_value": self.assumed_initial_value,
"tournament_id": self.tournament_id,
"pic_filename": self.pic_filename,
"locale_logos": [logo.serialize() for logo in self.locale_logos],
"locale_descriptions": self.locale_descriptions,
"locale_titles": self.locale_titles,
"locale_end_texts": self.locale_end_texts,
"palette": self.palette.serialize(),
"pilots": [p.serialize() for p in self.pilots],
}
def read(self, parser):
enemy_count = parser.get_uint32()
victory_text_offset = parser.get_uint32()
self.bk_name = parser.get_null_padded_str(14)
self.winnings_multiplier = parser.get_float()
self.unknown_a = parser.get_uint32()
self.registration_fee = parser.get_uint32()
self.assumed_initial_value = parser.get_uint32()
self.tournament_id = parser.get_uint32()
# Enemy block offsets
parser.set_pos(300)
offsets = [parser.get_uint32() for _ in range(enemy_count + 1)]
# Enemy data
for m in range(enemy_count):
parser.set_pos(offsets[m])
self.pilots.append(Pilot().read(parser))
# Seek to locales
parser.set_pos(offsets[enemy_count])
# Load logo sprites
self.locale_logos: typing.List[Sprite] = [
Sprite().read(parser) for _ in range(self.MAX_LOCALES)
]
# Tournament palette
self.palette = Palette().read_range(parser, 128, 40)
# Tournament PIC file name
self.pic_filename = parser.get_var_str(size_includes_zero=True)
# Locale texts
for m in range(self.MAX_LOCALES):
self.locale_titles.append(parser.get_var_str(size_includes_zero=True))
self.locale_descriptions.append(parser.get_var_str(size_includes_zero=True))
# Seek to victory texts
parser.set_pos(victory_text_offset)
# Get all end text pages for all pilots for all locales
for t in range(self.MAX_LOCALES):
pilots = []
for h in range(11):
pilots.append(
[parser.get_var_str(size_includes_zero=True) for _ in range(10)]
)
self.locale_end_texts.append(pilots)
return self
| mit | -2,213,516,994,580,909,800 | 32.526786 | 88 | 0.580826 | false | 3.617534 | false | false | false |
igatoolsProject/igatools | source/basis_functions/bernstein_extraction.serial.py | 1 | 1764 | #-+--------------------------------------------------------------------
# Igatools a general purpose Isogeometric analysis library.
# Copyright (C) 2012-2016 by the igatools authors (see authors.txt).
#
# This file is part of the igatools library.
#
# The igatools library is free software: you can use it, redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-+--------------------------------------------------------------------
# QA (pauletti, Jun 6, 2014):
from init_instantiation_data import *
data = Instantiation()
(f, inst) = (data.file_output, data.inst)
classes = ['BernsteinExtraction<%d,%d,%d>' %(x.dim, x.range, x.rank)
for x in inst.all_ref_sp_dims]
classes.append('BernsteinExtraction<0,0,1>')
#---------------------------------------------------
f.write('IGA_NAMESPACE_CLOSE\n')
archives = ['OArchive','IArchive']
id = 0
for c in unique(classes):
alias = 'BernsteinExtractionAlias%d' %(id)
f.write('using %s = iga::%s; \n' % (alias, c))
for ar in archives:
f.write('CEREAL_SPECIALIZE_FOR_ARCHIVE(%s,%s,cereal::specialization::member_serialize)\n' %(ar,alias))
id += 1
f.write('IGA_NAMESPACE_OPEN\n')
#---------------------------------------------------
| gpl-3.0 | -4,496,167,519,290,841,000 | 35 | 110 | 0.606576 | false | 3.614754 | false | false | false |
tvtsoft/odoo8 | addons/hr/res_config.py | 2 | 3066 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
class hr_config_settings(osv.osv_memory):
_name = 'hr.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_hr_timesheet_sheet': fields.boolean('Allow timesheets validation by managers',
help ="""This installs the module hr_timesheet_sheet."""),
'module_hr_attendance': fields.boolean('Install attendances feature',
help ="""This installs the module hr_attendance."""),
'module_hr_timesheet': fields.boolean('Manage timesheets',
help ="""This installs the module hr_timesheet."""),
'module_hr_holidays': fields.boolean('Manage holidays, leaves and allocation requests',
help ="""This installs the module hr_holidays."""),
'module_hr_expense': fields.boolean('Manage employees expenses',
help ="""This installs the module hr_expense."""),
'module_hr_recruitment': fields.boolean('Manage the recruitment process',
help ="""This installs the module hr_recruitment."""),
'module_hr_contract': fields.boolean('Record contracts per employee',
help ="""This installs the module hr_contract."""),
'module_hr_evaluation': fields.boolean('Organize employees periodic evaluation',
help ="""This installs the module hr_evaluation."""),
'module_hr_gamification': fields.boolean('Drive engagement with challenges and badges',
help ="""This installs the module hr_gamification."""),
'module_sale_contract': fields.boolean('Allow invoicing based on timesheets (the sale application will be installed)',
help ="""This installs the module sale_contract, which will install sales management too."""),
'module_hr_payroll': fields.boolean('Manage payroll',
help ="""This installs the module hr_payroll."""),
'module_website_hr_recruitment': fields.boolean('Publish jobs on your website',
help ="""This installs the module website_hr_recruitment"""),
'group_hr_attendance': fields.boolean('Track attendances for all employees',
implied_group='base.group_hr_attendance',
help="Allocates attendance group to all users."),
}
def onchange_hr_timesheet(self, cr, uid, ids, timesheet, context=None):
""" module_hr_timesheet implies module_hr_attendance """
if timesheet:
return {'value': {'module_hr_attendance': True}}
return {}
def onchange_hr_attendance(self, cr, uid, ids, attendance, context=None):
""" module_hr_timesheet implies module_hr_attendance """
if not attendance:
return {'value': {'module_hr_timesheet': False,'group_hr_attendance': False}}
return {}
def onchange_group_hr_attendance(self, cr, uid, ids, hr_attendance, context=None):
if hr_attendance:
return {'value': {'module_hr_attendance': True}}
return {}
| agpl-3.0 | 354,536,197,653,338,600 | 54.745455 | 126 | 0.644162 | false | 4.258333 | false | false | false |
Proteogenomics/trackhub-creator | pipelines/ensembl_data_collector.py | 1 | 4594 | #
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 26-07-2017 12:29
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
This pipeline collects data from Ensembl to avoid race conditions when running other pipelines that use this data
"""
import os
import time
# Application imports
import config_manager
import ensembl
from pipelines.template_pipeline import DirectorConfigurationManager, Director
__configuration_file = None
__pipeline_arguments = None
__pipeline_director = None
def set_configuration_file(config_file):
global __configuration_file
if __configuration_file is None:
__configuration_file = config_file
return __configuration_file
def set_pipeline_arguments(pipeline_arguments):
global __pipeline_arguments
if __pipeline_arguments is None:
__pipeline_arguments = pipeline_arguments
return __pipeline_arguments
def get_pipeline_director():
global __pipeline_director
if __pipeline_director is None:
__pipeline_director = EnsemblDataCollector(config_manager.read_config_from_file(__configuration_file),
__configuration_file,
__pipeline_arguments)
return __pipeline_director
class ConfigManager(DirectorConfigurationManager):
_CONFIG_OBJECT_KEY_NCBI_TAXONOMY_IDS = 'ncbi_taxonomy_ids'
def __init__(self, configuration_object, configuration_file, pipeline_arguments):
super(ConfigManager, self).__init__(configuration_object, configuration_file, pipeline_arguments)
self.__pipeline_arguments_object = None
def _process_pipeline_arguments(self):
# Pipeline arguments for this pipeline are like: "ncbi_taxonomy_ids=id,id,id"
id_list = []
if self._get_pipeline_arguments():
id_list = self._get_pipeline_arguments().split('=')[1].split(',')
return {
self._CONFIG_OBJECT_KEY_NCBI_TAXONOMY_IDS: id_list
}
def get_ncbi_taxonomy_ids(self):
return self._get_pipeline_arguments_object()[self._CONFIG_OBJECT_KEY_NCBI_TAXONOMY_IDS]
class EnsemblDataCollector(Director):
"""
This pipeline collects data from the latest Ensembl release for the given taxonomies
"""
def __init__(self, configuration_object, configuration_file, pipeline_arguments):
runner_id = "{}-{}".format(__name__, time.time())
super(EnsemblDataCollector, self).__init__(runner_id)
self.__config_manager = ConfigManager(configuration_object, configuration_file, pipeline_arguments)
def _get_configuration_manager(self):
return self.__config_manager
def __check_downloaded_files(self, files_names_and_paths):
result = True
for file_name, file_path in files_names_and_paths:
if not os.path.exists(file_path):
result = False
self._get_logger().error("MISSING ENSEMBL file '{}' at '{}'".format(file_name, file_path))
return result
def _run_pipeline(self):
# TODO - I can easily parallelize this using the parallel module
# Main pipeline algorithm
self._get_logger().info("[START]---> Pipeline run")
self._get_logger().info("Collecting Ensembl data for NCBI Taxonomies: {}"
.format(",".join(self._get_configuration_manager().get_ncbi_taxonomy_ids())))
ensembl_downloader_service = ensembl.data_downloader.get_data_download_service()
for ncbi_taxonomy_id in self._get_configuration_manager().get_ncbi_taxonomy_ids():
downloaded_protein_sequences = ensembl_downloader_service \
.get_protein_sequences_for_species(ncbi_taxonomy_id)
downloaded_gtf_files = ensembl_downloader_service \
.get_genome_reference_for_species(ncbi_taxonomy_id)
if not downloaded_protein_sequences:
self._get_logger().error("MISSING protein sequence data for taxonomy ID #{}".format(ncbi_taxonomy_id))
else:
self.__check_downloaded_files(downloaded_protein_sequences)
if not downloaded_gtf_files:
self._get_logger().error("MISSING genome reference data for taxonomy ID #{}".format(ncbi_taxonomy_id))
else:
self.__check_downloaded_files(downloaded_gtf_files)
return True
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
| apache-2.0 | -5,812,039,569,916,996,000 | 39.584071 | 118 | 0.655255 | false | 4.022807 | true | false | false |
zhs007/movieSpider | moviespider/moviespider/spiders/doubanmovie_spider.py | 1 | 1869 | # -*- coding: utf-8 -*-
from scrapy.spider import Spider
from scrapy.selector import Selector
from moviespider.items import Cili006Item
from moviespider.moviedb import MovieDB
import scrapy
class DoubanSearch2Spider(Spider):
moviedb = MovieDB()
name = "doubanmovie"
allowed_domains = ["douban.com"]
def start_requests(self):
lst = self.moviedb.getDoubanID_douban()
lstreq = []
for cur in lst:
req = scrapy.FormRequest("http://movie.douban.com/subject/%d/" % (cur[0]), callback=self.search_parse)
req.__setattr__('doubanid', cur[0])
lstreq.append(req)
#break
return lstreq
def search_parse(self, response):
sel = Selector(response)
print 'myparam is %d' % (response.request.doubanid)
title = sel.css('title')[0].xpath('./text()')[0].extract().strip()
print 'title is ' + title
photo = sel.css('a.nbgnbg')[0]
imgurl = photo.xpath('./img/@src')[0].extract()
arr1 = imgurl.split('/')
print 'img is ' + arr1[len(arr1) - 1]
self.moviedb.updMovie_doubanmovie(response.request.doubanid, title, arr1[len(arr1) - 1])
arrinfo = sel.css('div#info')
for curinfo in arrinfo:
print 'info is ' + curinfo.extract()
bi = curinfo.extract().find(u'>又名:</span>');
if bi > 0:
tstr = curinfo.extract()[bi + len(u'>又名:</span>'):]
ei = tstr.find('<br>')
tsrt1 = tstr[0:ei].strip()
print 'other name is ' + tsrt1
tarr1 = tsrt1.split('/')
for t1 in tarr1:
t1 = t1.strip()
print 't1 is ' + t1
self.moviedb.addMovieName_doubanmovie(response.request.doubanid, t1)
break
return [] | mit | 486,270,143,788,835,460 | 31.666667 | 114 | 0.547555 | false | 3.3898 | false | false | false |
k323r/flugkatze | cockpit/python/livePyQTPlot.py | 1 | 1663 |
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import collections
import random
import time
import math
import numpy as np
class DynamicPlotter():
def __init__(self, sampleinterval=0.1, timewindow=10., size=(600,350)):
# Data stuff
self._interval = int(sampleinterval*1000)
print(self._interval)
self._bufsize = int(timewindow/sampleinterval)
print(self._bufsize)
self.databuffer = collections.deque([0.0]*self._bufsize, self._bufsize)
self.x = np.linspace(-timewindow, 0.0, self._bufsize)
self.y = np.zeros(self._bufsize, dtype=np.float)
# PyQtGraph stuff
self.app = QtGui.QApplication([])
self.plt = pg.plot(title='Dynamic Plotting with PyQtGraph')
self.plt.resize(*size)
self.plt.showGrid(x=True, y=True)
self.plt.setLabel('left', 'amplitude', 'V')
self.plt.setLabel('bottom', 'time', 's')
self.curve = self.plt.plot(self.x, self.y, pen=(255,0,0))
# QTimer
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateplot)
self.timer.start(self._interval)
def getdata(self):
frequency = 0.5
noise = random.normalvariate(0., 1.)
new = 10.*math.sin(time.time()*frequency*2*math.pi) + noise
return new
def updateplot(self):
self.databuffer.append( self.getdata() )
self.y[:] = self.databuffer
self.curve.setData(self.x, self.y)
self.app.processEvents()
def run(self):
self.app.exec_()
if __name__ == '__main__':
m = DynamicPlotter(sampleinterval=0.05, timewindow=10.)
m.run()
| mit | 2,714,380,203,178,526,000 | 30.377358 | 79 | 0.616356 | false | 3.443064 | false | false | false |
zzzzrrr/openmelee | render/sdl.py | 1 | 3905 | #
# Copyright (c) 2009 Mason Green & Tom Novelli
#
# This file is part of OpenMelee.
#
# OpenMelee is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# OpenMelee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenMelee. If not, see <http://www.gnu.org/licenses/>.
#
import os
import pygame
import players.kbd_sdl as kbd
from utils import transform
BLACK = 0, 0, 0, 0
WHITE = 255,255,255
class Window:
"A Pygame/SDL interface in the style of Pyglet"
backend = 'sdl'
def __init__(self):
# Initialize Pygame/SDL
os.environ['SDL_VIDEO_WINDOW_POS'] = self.WINDOW_POSITION
pygame.init()
self.screen = pygame.display.set_mode((self.sizeX, self.sizeY))
'''
if sys.hexversion >= 0x2060000:
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Next statement gives this warning... no big deal...
# sysfont.py:139: DeprecationWarning: os.popen3 is deprecated. Use the subprocess module.
self.font = pygame.font.SysFont("", 24)
'''
self.font = pygame.font.SysFont("", 24)
print "Pygame (SDL) backend"
print " Timer resolution: %dms" % pygame.TIMER_RESOLUTION
try:
print " Using %s smoothscale backend." % pygame.transform.get_smoothscale_backend()
except AttributeError:
pass
transform.set_screen(self.screen)
self.clock = pygame.time.Clock()
def set_caption(self, caption):
pygame.display.set_caption(caption)
def get_time_ms(self):
return pygame.time.get_ticks()
def on_draw(self):
self.screen.fill(BLACK)
view = self.calculate_view()
zoom, view_center = view
transform.set_view(view)
# Display debug info
if self.net:
s = "player=%d rtt=%s tdiff=%s" % (
self.local_player,
self.net.rtt_avg,
self.net.tdiff_avg,
)
surf = self.font.render(s, False, WHITE)
self.screen.blit(surf, (0,20))
s = "fps=%3d zoom=%3.3f center=%5d,%5d" % (
self.clock.get_fps(),
zoom,
view_center[0], view_center[1],
)
surf = self.font.render(s, False, WHITE)
self.screen.blit(surf, (0,0))
# Common to SDL and GL renderers:
self.planet.draw(self.screen, view)
for s in self.actors:
if s:
s.draw(self.screen, view)
# Draw lines between objects (DEBUG)
'''
a = transform.to_sdl(self.planet.body.position)
for ship in self.actors:
b = transform.to_sdl(ship.body.position)
pygame.draw.line(self.screen, (0,0,255), a, b)
'''
# Draw world bounding box
c = 90, 230, 230
ub = self.aabb.upper_bound
lb = self.aabb.lower_bound
x1,y1 = transform.to_sdl((lb.x, lb.y))
x2,y2 = transform.to_sdl((ub.x, ub.y))
pygame.draw.rect(self.screen, c, pygame.Rect(x1, y1, x2-x1, y2-y1), 2)
# End of frame
pygame.display.update()
self.clock.tick(self.frame_rate)
def mainloop(self):
while 1:
if kbd.process_events(self):
return
if self.net:
self.net.process_events(self)
self.update()
self.on_draw()
| gpl-3.0 | 6,273,487,270,064,252,000 | 30.491935 | 106 | 0.579513 | false | 3.697917 | false | false | false |
dariusbakunas/rawdisk | rawdisk/plugins/filesystems/ntfs/bootsector.py | 1 | 3080 | # -*- coding: utf-8 -*-
from rawdisk.util.rawstruct import RawStruct
from .headers import BIOS_PARAMETER_BLOCK, EXTENDED_BIOS_PARAMETER_BLOCK
class BootSector(RawStruct):
"""Represents NTFS Bootsector
Attributes:
oem_id (8 byte string): NTFS filesystem signature 'NTFS '
bpb (Bpb): Initialized :class:`~.bpb.Bpb` object.
mft_offset (int): Offset to MFT table from the start of \
NTFS volume in bytes
See More:
http://ntfs.com/ntfs-partition-boot-sector.htm
"""
def __init__(self, data=None, offset=None, length=None, filename=None):
RawStruct.__init__(
self,
data=data,
offset=offset,
length=length,
filename=filename
)
self.oem_id = self.get_string(3, 8)
self.bpb = BIOS_PARAMETER_BLOCK(
self.get_ushort_le(0x0B), # bytes_per_sector
self.get_ubyte(0x0D), # sectors_per_cluster
self.get_ushort_le(0x0E), # reserved_sectors
self.get_ubyte(0x15), # media_type
self.get_ushort_le(0x18), # sectors_per_track
self.get_ushort_le(0x1A), # heads
self.get_uint_le(0x1C), # hidden_sectors
self.get_ulonglong_le(0x28), # total sectors
)
self.extended_bpb = EXTENDED_BIOS_PARAMETER_BLOCK(
self.get_ulonglong_le(0x30), # mft_cluster
self.get_ulonglong_le(0x38), # mft_mirror_cluster
self.get_byte(0x40), # clusters_per_mft
self.get_ubyte(0x44), # clusters_per_index
self.get_ulonglong_le(0x48), # volume_serial
)
@property
def mft_record_size(self):
"""
Returns:
int: MFT record size in bytes
"""
if self.extended_bpb.clusters_per_mft < 0:
return 2 ** abs(self.extended_bpb.clusters_per_mft)
else:
return self.clusters_per_mft * self.sectors_per_cluster * \
self.bytes_per_sector
@property
def mft_offset(self):
"""
Returns:
int: MFT Table offset from the beginning of the partition in bytes
"""
return self.bpb.bytes_per_sector * \
self.bpb.sectors_per_cluster * self.extended_bpb.mft_cluster
@property
def mft_mirror_offset(self):
"""
Returns:
int: Mirror MFT Table offset from the beginning of the partition \
in bytes
"""
return self.bpb.bytes_per_sector * \
self.bpb.sectors_per_cluster * self.extended_bpb.mft_mirror_cluster
@property
def total_clusters(self):
return int(self.bpb.total_sectors / self.bpb.sectors_per_cluster)
@property
def bytes_per_cluster(self):
return self.bpb.sectors_per_cluster * self.bpb.bytes_per_sector
@property
def volume_size(self):
"""Returns volume size in bytes"""
return self.bpb.bytes_per_sector * self.bpb.total_sectors
| bsd-3-clause | -8,052,300,448,519,116,000 | 32.478261 | 79 | 0.570779 | false | 3.52 | false | false | false |
nbeck90/django-imager | imager/imager_images/migrations/0002_auto_20150309_1616.py | 1 | 1460 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='imageralbum',
name='published',
field=models.CharField(default=b'public', max_length=7, choices=[(b'public', b'Public'), (b'private', b'Private'), (b'shared', b'Shared')]),
preserve_default=True,
),
migrations.AlterField(
model_name='imageralbum',
name='title',
field=models.CharField(default=b'MyAlbum', max_length=63),
preserve_default=True,
),
migrations.AlterField(
model_name='imagerphoto',
name='description',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='imagerphoto',
name='published',
field=models.CharField(default=b'public', max_length=31, choices=[(b'public', b'Public'), (b'private', b'Private'), (b'shared', b'Shared')]),
preserve_default=True,
),
migrations.AlterField(
model_name='imagerphoto',
name='title',
field=models.CharField(default=b'MyPhoto', max_length=31),
preserve_default=True,
),
]
| mit | -3,027,618,557,306,669,600 | 32.181818 | 153 | 0.560959 | false | 4.219653 | false | false | false |
codefordc/housing-insights | back_end/ETL/project.py | 1 | 6472 | '''
make_projects_table.py
----------------------
This file creates the projects table in the database, which is sent to
the front-end via /api/projects/. It depends on the following data sources:
- Projects.csv, From the Preservation Catalog Folder in the s3
- 'Affordable Housing Data': Updated regularly from open data DC
- Master Adress Repository
Projects that are not from the preservation catalog have an nlihc_id
beginning with "AH" for affordable housing.
'''
from . import utils
from . import wmata
import requests
import numpy as np
import pandas as pd
import geopandas as gp
preservation_catalog_columns = [
'nlihc_id',
'latitude',
'longitude',
'census_tract',
'neighborhood_cluster',
'ward',
'neighborhood_cluster_desc',
# Basic Project Information',
'proj_name',
'proj_addre',
'proj_units_tot',
'proj_address_id',
'proj_units_assist_max',
'proj_owner_type',
'most_recent_reac_score_num',
'most_recent_reac_score_date',
]
def load_preservation_catalog_projects():
'''
Loads the raw data from the preservation catalog.
It is located in 'preservation_catalog' on the S3.
'''
df = pd.read_csv(utils.S3+'preservation_catalog/Project.csv')
df.columns = df.columns.str.lower()
df = utils.get_census_tract_for_data(df, 'proj_lon', 'proj_lat')
df['neighborhood_cluster'] = utils.just_digits(df.cluster_tr2000)
df['ward'] = utils.just_digits(df.ward2012)
df = df.merge(load_reac_data(), how='left')
return df.rename(columns={'proj_lat': 'latitude',
'proj_lon': 'longitude',
'tract': 'census_tract',
'date': 'most_recent_reac_score_date',
'reac_score_num': 'most_recent_reac_score_num',
'cluster_tr2000_name': 'neighborhood_cluster_desc',
})[preservation_catalog_columns]
def load_affordable_housing_projects():
'''Loads and transforms the "Affordabe Housing" raw data from opendata.dc'''
columns = {
'ADDRESS_ID': 'proj_address_id',
'FULLADDRESS': 'proj_addre',
'MAR_WARD': 'ward',
'PROJECT_NAME': 'proj_name',
'TOTAL_AFFORDABLE_UNITS': 'proj_units_tot',
'LATITUDE': 'latitude',
'LONGITUDE': 'longitude',
'tract': 'census_tract',
}
url = utils.get_paths_for_data('affordable_housing', years=utils.get_years())[0]
df = pd.read_csv(url)
df['MAR_WARD'] = utils.just_digits(df['MAR_WARD'])
df = utils.get_census_tract_for_data(df, 'LONGITUDE','LATITUDE')
df = df.rename(columns=columns)[columns.values()]
df = utils.get_cluster_for_data(df, 'longitude', 'latitude')
df['nlihc_id'] = pd.Series(df.index).astype(str).apply(lambda s: 'AH' + s.zfill(6))
return df[['nlihc_id', 'neighborhood_cluster']+ list(columns.values())]
def load_mar_projects():
'''Loads and trasforms the "Address Points" raw data from opendata.dc'''
url = utils.get_paths_for_data('mar', years=utils.get_years())[0]
df = pd.read_csv(url)
df = df[['ADDRESS_ID', 'ACTIVE_RES_UNIT_COUNT', 'SSL', 'CLUSTER_']]
df.columns = ['proj_address_id', 'active_res_unit_count', 'ssl', 'neighborhood_cluster']
return df
def load_tax():
'''Adds the Project Taxable Value attribute to the data.'''
# Tax Data. Seems to update every year.
r = requests.get(
'https://maps2.dcgis.dc.gov/dcgis/rest/services/DCGIS_DATA/Property_and_Land_WebMercator/MapServer/53/query?where=1%3D1&outFields=SSL,ASSESSMENT&returnGeometry=false&outSR=4326&f=json'
)
data = r.json()['features']
return {r['attributes']['SSL']: r['attributes']['ASSESSMENT'] for r in data}
def load_topa():
'''
This function loads the raw TOPA data, grabs the most recent date for
each address id, and counts the number of TOPA notices for each address id.
It returns a dataframe where the obserations are an address id, the most
recent topa notice as a data, and the number of topa notices.
'''
df = pd.read_csv(utils.S3+'topa/Rcasd_current.csv')
df.columns = df.columns.str.lower()
df['most_recent_topa_date'] = pd.to_datetime(df['notice_date'])
return pd.concat([
# The most recent topa data.
(df.sort_values('most_recent_topa_date', ascending=False)
.groupby('address_id').first()['most_recent_topa_date']),
# Number of observations per address id.
df.address_id.value_counts()
], axis=1).reset_index().rename(columns={
# Fixing column names
'address_id': 'topa_count', 'index': 'proj_address_id'})
def load_reac_data():
'''Gets REAC information from the s3.'''
df = pd.read_csv(utils.S3+'preservation_catalog/Reac_score.csv')
df.columns = df.columns.str.lower()
df['date'] = pd.to_datetime(df['reac_date'])
df = df.sort_values('date', ascending=False).groupby('nlihc_id').first()
return df[['date', 'reac_score_num']].reset_index()
def load_project_data(engine):
'''With the addition of MAR - this takes a long time (a few minutes).'''
print("Starting load")
df = pd.concat([load_preservation_catalog_projects(),
load_affordable_housing_projects()], sort=True)
df = df.sort_values('nlihc_id').drop_duplicates('proj_address_id')
df = add_mar_and_tax(df)
df = add_neighborhoods(df)
df = df.merge(load_topa(), on='proj_address_id', how='left')
bus = wmata.add_bus_stops(df[['nlihc_id', 'longitude', 'latitude']],
'longitude', 'latitude')
df = df.merge(bus, how='left')
return utils.write_table(df, 'new_project', engine)
def add_mar_and_tax(df):
print("Adding mar and tax")
df = df.merge(load_mar_projects(), on='proj_address_id', how='left')
df['sum_appraised_value_current_total'] = df['ssl'].map(load_tax())
return df
def add_neighborhoods(df):
print("Adding neighborhoods")
# Fix neighborhood Cluster Info
df['neighborhood_cluster_x'] = utils.just_digits(df.neighborhood_cluster_x)
df['neighborhood_cluster_y'] = utils.just_digits(df.neighborhood_cluster_y)
df['neighborhood_cluster'] = df.apply(lambda row: max(
row.neighborhood_cluster_x, row.neighborhood_cluster_y), axis=1)
return df.drop(columns=['neighborhood_cluster_x', 'neighborhood_cluster_y'])
| mit | -7,621,508,386,215,367,000 | 40.487179 | 192 | 0.631335 | false | 3.313876 | false | false | false |
akosiaris/p2pool | p2pool/work.py | 1 | 20038 | from __future__ import division
import base64
import random
import sys
import time
from twisted.internet import defer
from twisted.python import log
import bitcoin.getwork as bitcoin_getwork, bitcoin.data as bitcoin_data
from bitcoin import helper, script, worker_interface
from util import forest, jsonrpc, variable, deferral, math, pack
import p2pool, p2pool.data as p2pool_data
class WorkerBridge(worker_interface.WorkerBridge):
def __init__(self, node, my_pubkey_hash, donation_percentage, merged_urls, worker_fee):
worker_interface.WorkerBridge.__init__(self)
self.recent_shares_ts_work = []
self.node = node
self.my_pubkey_hash = my_pubkey_hash
self.donation_percentage = donation_percentage
self.worker_fee = worker_fee
self.running = True
self.pseudoshare_received = variable.Event()
self.share_received = variable.Event()
self.local_rate_monitor = math.RateMonitor(10*60)
self.removed_unstales_var = variable.Variable((0, 0, 0))
self.removed_doa_unstales_var = variable.Variable(0)
self.my_share_hashes = set()
self.my_doa_share_hashes = set()
self.tracker_view = forest.TrackerView(self.node.tracker, forest.get_attributedelta_type(dict(forest.AttributeDelta.attrs,
my_count=lambda share: 1 if share.hash in self.my_share_hashes else 0,
my_doa_count=lambda share: 1 if share.hash in self.my_doa_share_hashes else 0,
my_orphan_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'orphan' else 0,
my_dead_announce_count=lambda share: 1 if share.hash in self.my_share_hashes and share.share_data['stale_info'] == 'doa' else 0,
)))
@self.node.tracker.verified.removed.watch
def _(share):
if share.hash in self.my_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
assert share.share_data['stale_info'] in [None, 'orphan', 'doa'] # we made these shares in this instance
self.removed_unstales_var.set((
self.removed_unstales_var.value[0] + 1,
self.removed_unstales_var.value[1] + (1 if share.share_data['stale_info'] == 'orphan' else 0),
self.removed_unstales_var.value[2] + (1 if share.share_data['stale_info'] == 'doa' else 0),
))
if share.hash in self.my_doa_share_hashes and self.node.tracker.is_child_of(share.hash, self.node.best_share_var.value):
self.removed_doa_unstales_var.set(self.removed_doa_unstales_var.value + 1)
# MERGED WORK
self.merged_work = variable.Variable({})
@defer.inlineCallbacks
def set_merged_work(merged_url, merged_userpass):
merged_proxy = jsonrpc.Proxy(merged_url, dict(Authorization='Basic ' + base64.b64encode(merged_userpass)))
while self.running:
auxblock = yield deferral.retry('Error while calling merged getauxblock:', 30)(merged_proxy.rpc_getauxblock)()
self.merged_work.set(dict(self.merged_work.value, **{auxblock['chainid']: dict(
hash=int(auxblock['hash'], 16),
target='p2pool' if auxblock['target'] == 'p2pool' else pack.IntType(256).unpack(auxblock['target'].decode('hex')),
merged_proxy=merged_proxy,
)}))
yield deferral.sleep(1)
for merged_url, merged_userpass in merged_urls:
set_merged_work(merged_url, merged_userpass)
@self.merged_work.changed.watch
def _(new_merged_work):
print 'Got new merged mining work!'
# COMBINE WORK
self.current_work = variable.Variable(None)
def compute_work():
t = self.node.bitcoind_work.value
bb = self.node.best_block_header.value
if bb is not None and bb['previous_block'] == t['previous_block'] and self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(bb)) <= t['bits'].target:
print 'Skipping from block %x to block %x!' % (bb['previous_block'],
bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)))
t = dict(
version=bb['version'],
previous_block=bitcoin_data.hash256(bitcoin_data.block_header_type.pack(bb)),
bits=bb['bits'], # not always true
coinbaseflags='',
height=t['height'] + 1,
time=bb['timestamp'] + 600, # better way?
transactions=[],
transaction_fees=[],
merkle_link=bitcoin_data.calculate_merkle_link([None], 0),
subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.node.bitcoind_work.value['height']),
last_update=self.node.bitcoind_work.value['last_update'],
)
self.current_work.set(t)
self.node.bitcoind_work.changed.watch(lambda _: compute_work())
self.node.best_block_header.changed.watch(lambda _: compute_work())
compute_work()
self.new_work_event = variable.Event()
@self.current_work.transitioned.watch
def _(before, after):
# trigger LP if version/previous_block/bits changed or transactions changed from nothing
if any(before[x] != after[x] for x in ['version', 'previous_block', 'bits']) or (not before['transactions'] and after['transactions']):
self.new_work_event.happened()
self.merged_work.changed.watch(lambda _: self.new_work_event.happened())
self.node.best_share_var.changed.watch(lambda _: self.new_work_event.happened())
def stop(self):
self.running = False
def get_stale_counts(self):
'''Returns (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain)'''
my_shares = len(self.my_share_hashes)
my_doa_shares = len(self.my_doa_share_hashes)
delta = self.tracker_view.get_delta_to_last(self.node.best_share_var.value)
my_shares_in_chain = delta.my_count + self.removed_unstales_var.value[0]
my_doa_shares_in_chain = delta.my_doa_count + self.removed_doa_unstales_var.value
orphans_recorded_in_chain = delta.my_orphan_announce_count + self.removed_unstales_var.value[1]
doas_recorded_in_chain = delta.my_dead_announce_count + self.removed_unstales_var.value[2]
my_shares_not_in_chain = my_shares - my_shares_in_chain
my_doa_shares_not_in_chain = my_doa_shares - my_doa_shares_in_chain
return (my_shares_not_in_chain - my_doa_shares_not_in_chain, my_doa_shares_not_in_chain), my_shares, (orphans_recorded_in_chain, doas_recorded_in_chain)
def get_user_details(self, request):
user = request.getUser() if request.getUser() is not None else ''
desired_pseudoshare_target = None
if '+' in user:
user, desired_pseudoshare_difficulty_str = user.rsplit('+', 1)
try:
desired_pseudoshare_target = bitcoin_data.difficulty_to_target(float(desired_pseudoshare_difficulty_str))
except:
pass
desired_share_target = 2**256 - 1
if '/' in user:
user, min_diff_str = user.rsplit('/', 1)
try:
desired_share_target = bitcoin_data.difficulty_to_target(float(min_diff_str))
except:
pass
if random.uniform(0, 100) < self.worker_fee:
pubkey_hash = self.my_pubkey_hash
else:
try:
pubkey_hash = bitcoin_data.address_to_pubkey_hash(user, self.node.net.PARENT)
except: # XXX blah
pubkey_hash = self.my_pubkey_hash
return user, pubkey_hash, desired_share_target, desired_pseudoshare_target
def preprocess_request(self, request):
user, pubkey_hash, desired_share_target, desired_pseudoshare_target = self.get_user_details(request)
return pubkey_hash, desired_share_target, desired_pseudoshare_target
def get_work(self, pubkey_hash, desired_share_target, desired_pseudoshare_target):
if (self.node.p2p_node is None or len(self.node.p2p_node.peers) == 0) and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is not connected to any peers')
if self.node.best_share_var.value is None and self.node.net.PERSIST:
raise jsonrpc.Error_for_code(-12345)(u'p2pool is downloading shares')
if time.time() > self.current_work.value['last_update'] + 60:
raise jsonrpc.Error_for_code(-12345)(u'lost contact with bitcoind')
if self.merged_work.value:
tree, size = bitcoin_data.make_auxpow_tree(self.merged_work.value)
mm_hashes = [self.merged_work.value.get(tree.get(i), dict(hash=0))['hash'] for i in xrange(size)]
mm_data = '\xfa\xbemm' + bitcoin_data.aux_pow_coinbase_type.pack(dict(
merkle_root=bitcoin_data.merkle_hash(mm_hashes),
size=size,
nonce=0,
))
mm_later = [(aux_work, mm_hashes.index(aux_work['hash']), mm_hashes) for chain_id, aux_work in self.merged_work.value.iteritems()]
else:
mm_data = ''
mm_later = []
tx_hashes = [bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx)) for tx in self.current_work.value['transactions']]
tx_map = dict(zip(tx_hashes, self.current_work.value['transactions']))
if self.node.best_share_var.value is None:
share_type = p2pool_data.Share
else:
previous_share = self.node.tracker.items[self.node.best_share_var.value]
previous_share_type = type(previous_share)
if previous_share_type.SUCCESSOR is None or self.node.tracker.get_height(previous_share.hash) < self.node.net.CHAIN_LENGTH:
share_type = previous_share_type
else:
successor_type = previous_share_type.SUCCESSOR
counts = p2pool_data.get_desired_version_counts(self.node.tracker,
self.node.tracker.get_nth_parent_hash(previous_share.hash, self.node.net.CHAIN_LENGTH*9//10), self.node.net.CHAIN_LENGTH//10)
# Share -> NewShare only valid if 85% of hashes in [net.CHAIN_LENGTH*9//10, net.CHAIN_LENGTH] for new version
if counts.get(successor_type.VERSION, 0) > sum(counts.itervalues())*95//100:
share_type = successor_type
else:
share_type = previous_share_type
if True:
share_info, gentx, other_transaction_hashes, get_share = share_type.generate_transaction(
tracker=self.node.tracker,
share_data=dict(
previous_share_hash=self.node.best_share_var.value,
coinbase=(script.create_push_script([
self.current_work.value['height'],
] + ([mm_data] if mm_data else []) + [
]) + self.current_work.value['coinbaseflags'])[:100],
nonce=random.randrange(2**32),
pubkey_hash=pubkey_hash,
subsidy=self.current_work.value['subsidy'],
donation=math.perfect_round(65535*self.donation_percentage/100),
stale_info=(lambda (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain):
'orphan' if orphans > orphans_recorded_in_chain else
'doa' if doas > doas_recorded_in_chain else
None
)(*self.get_stale_counts()),
desired_version=share_type.SUCCESSOR.VERSION if share_type.SUCCESSOR is not None else share_type.VERSION,
),
block_target=self.current_work.value['bits'].target,
desired_timestamp=int(time.time() + 0.5),
desired_target=desired_share_target,
ref_merkle_link=dict(branch=[], index=0),
desired_other_transaction_hashes_and_fees=zip(tx_hashes, self.current_work.value['transaction_fees']),
net=self.node.net,
known_txs=tx_map,
base_subsidy=self.node.net.PARENT.SUBSIDY_FUNC(self.current_work.value['height']),
)
transactions = [gentx] + [tx_map[tx_hash] for tx_hash in other_transaction_hashes]
mm_later = [(dict(aux_work, target=aux_work['target'] if aux_work['target'] != 'p2pool' else share_info['bits'].target), index, hashes) for aux_work, index, hashes in mm_later]
if desired_pseudoshare_target is None:
target = 2**256-1
if len(self.recent_shares_ts_work) == 50:
hash_rate = sum(work for ts, work in self.recent_shares_ts_work[1:])//(self.recent_shares_ts_work[-1][0] - self.recent_shares_ts_work[0][0])
if hash_rate:
target = min(target, int(2**256/hash_rate))
else:
target = desired_pseudoshare_target
target = max(target, share_info['bits'].target)
for aux_work, index, hashes in mm_later:
target = max(target, aux_work['target'])
target = math.clip(target, self.node.net.PARENT.SANE_TARGET_RANGE)
getwork_time = time.time()
lp_count = self.new_work_event.times
merkle_link = bitcoin_data.calculate_merkle_link([bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx)) for tx in transactions], 0)
print 'New work for worker! Difficulty: %.06f Share difficulty: %.06f Total block value: %.6f %s including %i transactions' % (
bitcoin_data.target_to_difficulty(target),
bitcoin_data.target_to_difficulty(share_info['bits'].target),
self.current_work.value['subsidy']*1e-8, self.node.net.PARENT.SYMBOL,
len(self.current_work.value['transactions']),
)
ba = bitcoin_getwork.BlockAttempt(
version=min(self.current_work.value['version'], 2),
previous_block=self.current_work.value['previous_block'],
merkle_root=bitcoin_data.check_merkle_link(bitcoin_data.hash256(bitcoin_data.tx_type.pack(transactions[0])), merkle_link),
timestamp=self.current_work.value['time'],
bits=self.current_work.value['bits'],
share_target=target,
)
received_header_hashes = set()
def got_response(header, request):
header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header))
pow_hash = self.node.net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(header))
try:
if pow_hash <= header['bits'].target or p2pool.DEBUG:
helper.submit_block(dict(header=header, txs=transactions), False, self.node.factory, self.node.bitcoind, self.node.bitcoind_work, self.node.net)
if pow_hash <= header['bits'].target:
print
print 'GOT BLOCK FROM MINER! Passing to bitcoind! %s%064x' % (self.node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX, header_hash)
print
except:
log.err(None, 'Error while processing potential block:')
user, _, _, _ = self.get_user_details(request)
assert header['previous_block'] == ba.previous_block
assert header['merkle_root'] == ba.merkle_root
assert header['bits'] == ba.bits
on_time = self.new_work_event.times == lp_count
for aux_work, index, hashes in mm_later:
try:
if pow_hash <= aux_work['target'] or p2pool.DEBUG:
df = deferral.retry('Error submitting merged block: (will retry)', 10, 10)(aux_work['merged_proxy'].rpc_getauxblock)(
pack.IntType(256, 'big').pack(aux_work['hash']).encode('hex'),
bitcoin_data.aux_pow_type.pack(dict(
merkle_tx=dict(
tx=transactions[0],
block_hash=header_hash,
merkle_link=merkle_link,
),
merkle_link=bitcoin_data.calculate_merkle_link(hashes, index),
parent_block_header=header,
)).encode('hex'),
)
@df.addCallback
def _(result, aux_work=aux_work):
if result != (pow_hash <= aux_work['target']):
print >>sys.stderr, 'Merged block submittal result: %s Expected: %s' % (result, pow_hash <= aux_work['target'])
else:
print 'Merged block submittal result: %s' % (result,)
@df.addErrback
def _(err):
log.err(err, 'Error submitting merged block:')
except:
log.err(None, 'Error while processing merged mining POW:')
if pow_hash <= share_info['bits'].target and header_hash not in received_header_hashes:
share = get_share(header, transactions)
print 'GOT SHARE! %s %s prev %s age %.2fs%s' % (
request.getUser(),
p2pool_data.format_hash(share.hash),
p2pool_data.format_hash(share.previous_hash),
time.time() - getwork_time,
' DEAD ON ARRIVAL' if not on_time else '',
)
self.my_share_hashes.add(share.hash)
if not on_time:
self.my_doa_share_hashes.add(share.hash)
self.node.tracker.add(share)
self.node.set_best_share()
try:
if (pow_hash <= header['bits'].target or p2pool.DEBUG) and self.node.p2p_node is not None:
self.node.p2p_node.broadcast_share(share.hash)
except:
log.err(None, 'Error forwarding block solution:')
self.share_received.happened(bitcoin_data.target_to_average_attempts(share.target), not on_time)
if pow_hash > target:
print 'Worker %s submitted share with hash > target:' % (request.getUser(),)
print ' Hash: %56x' % (pow_hash,)
print ' Target: %56x' % (target,)
elif header_hash in received_header_hashes:
print >>sys.stderr, 'Worker %s @ %s submitted share more than once!' % (request.getUser(), request.getClientIP())
else:
received_header_hashes.add(header_hash)
self.pseudoshare_received.happened(bitcoin_data.target_to_average_attempts(target), not on_time, user)
self.recent_shares_ts_work.append((time.time(), bitcoin_data.target_to_average_attempts(target)))
while len(self.recent_shares_ts_work) > 50:
self.recent_shares_ts_work.pop(0)
self.local_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), dead=not on_time, user=user))
return on_time
return ba, got_response
| gpl-3.0 | 8,832,913,668,593,025,000 | 53.451087 | 184 | 0.568819 | false | 3.857913 | false | false | false |
jrichte43/ProjectEuler | Problem-0416/solutions.py | 1 | 1378 |
__problem_title__ = "A frog's trip"
__problem_url___ = "https://projecteuler.net/problem=416"
__problem_description__ = "A row of squares contains a frog in the leftmost square. By " \
"successive jumps the frog goes to the rightmost square and then back " \
"to the leftmost square. On the outward trip he jumps one, two or " \
"three squares to the right, and on the homeward trip he jumps to the " \
"left in a similar manner. He cannot jump outside the squares. He " \
"repeats the round-trip travel times. Let F( , ) be the number of the " \
"ways the frog can travel so that at most one square remains " \
"unvisited. For example, F(1, 3) = 4, F(1, 4) = 15, F(1, 5) = 46, F(2, " \
"3) = 16 and F(2, 100) mod 10 = 429619151. Find the last 9 digits of " \
"F(10, 10 )."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | 4,883,869,645,783,429,000 | 39.529412 | 100 | 0.535559 | false | 3.870787 | false | false | false |
yossitamarov/calc_local_library | polls/excelcompiler.py | 1 | 40551 |
# We will choose our wrapper with os compatibility
# ExcelComWrapper : Must be run on Windows as it requires a COM link to an Excel instance.
# ExcelOpxWrapper : Can be run anywhere but only with post 2010 Excel formats
try:
import win32com.client
import pythoncom
from pycel.excelwrapper import ExcelComWrapper as ExcelWrapperImpl
except:
print "Can\'t import win32com -> switch from Com to Openpyxl wrapping implementation"
from pycel.excelwrapper import ExcelOpxWrapper as ExcelWrapperImpl
import pycel.excellib
from pycel.excellib import *
from pycel.excelutil import *
from math import *
from networkx.classes.digraph import DiGraph
from networkx.drawing.nx_pydot import write_dot
from networkx.drawing.nx_pylab import draw, draw_circular
from networkx.readwrite.gexf import write_gexf
from pycel.tokenizer import ExcelParser, f_token, shunting_yard
import cPickle
import logging
import networkx as nx
__version__ = filter(str.isdigit, "$Revision: 2524 $")
__date__ = filter(str.isdigit, "$Date: 2011-09-06 17:05:00 +0100 (Tue, 06 Sep 2011) $")
__author__ = filter(str.isdigit, "$Author: dg2d09 $")
class Spreadsheet(object):
def __init__(self, G, cellmap):
super(Spreadsheet, self).__init__()
self.G = G
self.cellmap = cellmap
self.params = None
@staticmethod
def load_from_file(fname):
f = open(fname, 'rb')
obj = cPickle.load(f)
# obj = load(f)
return obj
def save_to_file(self, fname):
f = open(fname, 'wb')
cPickle.dump(self, f, protocol=2)
f.close()
def export_to_dot(self, fname):
write_dot(self.G, fname)
def export_to_gexf(self, fname):
write_gexf(self.G, fname)
def plot_graph(self):
import matplotlib.pyplot as plt
pos = nx.spring_layout(self.G, iterations=2000)
# pos=nx.spectral_layout(G)
# pos = nx.random_layout(G)
nx.draw_networkx_nodes(self.G, pos)
nx.draw_networkx_edges(self.G, pos, arrows=True)
nx.draw_networkx_labels(self.G, pos)
plt.show()
def set_value(self, cell, val, is_addr=True):
if is_addr:
cell = self.cellmap[cell]
if cell.value != val:
# reset the node + its dependencies
self.reset(cell)
# set the value
cell.value = val
def reset(self, cell):
if cell.value is None: return
# print "resetting", cell.address()
cell.value = None
map(self.reset, self.G.successors_iter(cell))
def print_value_tree(self, addr, indent):
cell = self.cellmap[addr]
print "%s %s = %s" % (" " * indent, addr, cell.value)
for c in self.G.predecessors_iter(cell):
self.print_value_tree(c.address(), indent + 1)
def recalculate(self):
for c in self.cellmap.values():
if isinstance(c, CellRange):
self.evaluate_range(c, is_addr=False)
else:
self.evaluate(c, is_addr=False)
def evaluate_range(self, rng, is_addr=True):
if is_addr:
rng = self.cellmap[rng]
# its important that [] gets treated ad false here
if rng.value:
return rng.value
cells, nrows, ncols = rng.celladdrs, rng.nrows, rng.ncols
if nrows == 1 or ncols == 1:
data = [self.evaluate(c) for c in cells]
else:
data = [[self.evaluate(c) for c in cells[i]] for i in range(len(cells))]
rng.value = data
return data
def evaluate(self, cell, is_addr=True):
if is_addr:
cell = self.cellmap[cell]
# no formula, fixed value
if not cell.formula or cell.value != None:
# print " returning constant or cached value for ", cell.address()
return cell.value
# recalculate formula
# the compiled expression calls this function
def eval_cell(address):
return self.evaluate(address)
def eval_range(rng):
return self.evaluate_range(rng)
try:
print "Evalling: %s, %s" % (cell.address(), cell.python_expression)
vv = eval(cell.compiled_expression)
# print "Cell %s evalled to %s" % (cell.address(),vv)
if vv is None:
print "WARNING %s is None" % (cell.address())
cell.value = vv
except Exception as e:
if e.message.startswith("Problem evalling"):
raise e
else:
raise Exception("Problem evalling: %s for %s, %s" % (e, cell.address(), cell.python_expression))
return cell.value
class ASTNode(object):
"""A generic node in the AST"""
def __init__(self, token):
super(ASTNode, self).__init__()
self.token = token
def __str__(self):
return self.token.tvalue
def __getattr__(self, name):
return getattr(self.token, name)
def children(self, ast):
args = ast.predecessors(self)
args = sorted(args, key=lambda x: ast.node[x]['pos'])
# args.reverse()
return args
def parent(self, ast):
args = ast.successors(self)
return args[0] if args else None
def emit(self, ast, context=None):
"""Emit code"""
self.token.tvalue
class OperatorNode(ASTNode):
def __init__(self, *args):
super(OperatorNode, self).__init__(*args)
# convert the operator to python equivalents
self.opmap = {
"^": "**",
"=": "==",
"&": "+",
"": "+" # union
}
def emit(self, ast, context=None):
xop = self.tvalue
# Get the arguments
args = self.children(ast)
op = self.opmap.get(xop, xop)
if self.ttype == "operator-prefix":
return "-" + args[0].emit(ast, context=context)
parent = self.parent(ast)
# dont render the ^{1,2,..} part in a linest formula
# TODO: bit of a hack
if op == "**":
if parent and parent.tvalue.lower() == "linest":
return args[0].emit(ast, context=context)
# TODO silly hack to work around the fact that None < 0 is True (happens on blank cells)
if op == "<" or op == "<=":
aa = args[0].emit(ast, context=context)
ss = "(" + aa + " if " + aa + " is not None else float('inf'))" + op + args[1].emit(ast, context=context)
elif op == ">" or op == ">=":
aa = args[1].emit(ast, context=context)
ss = args[0].emit(ast, context=context) + op + "(" + aa + " if " + aa + " is not None else float('inf'))"
else:
ss = args[0].emit(ast, context=context) + op + args[1].emit(ast, context=context)
# avoid needless parentheses
if parent and not isinstance(parent, FunctionNode):
ss = "(" + ss + ")"
return ss
class OperandNode(ASTNode):
def __init__(self, *args):
super(OperandNode, self).__init__(*args)
def emit(self, ast, context=None):
t = self.tsubtype
if t == "logical":
return str(self.tvalue.lower() == "true")
elif t == "text" or t == "error":
# if the string contains quotes, escape them
val = self.tvalue.replace('"', '\\"')
return '"' + val + '"'
else:
return str(self.tvalue)
class RangeNode(OperandNode):
"""Represents a spreadsheet cell or range, e.g., A5 or B3:C20"""
def __init__(self, *args):
super(RangeNode, self).__init__(*args)
def get_cells(self):
return resolve_range(self.tvalue)[0]
def emit(self, ast, context=None):
# resolve the range into cells
rng = self.tvalue.replace('$', '')
sheet = context.curcell.sheet + "!" if context else ""
if is_range(rng):
sh, start, end = split_range(rng)
if sh:
str = 'eval_range("' + rng + '")'
else:
str = 'eval_range("' + sheet + rng + '")'
else:
sh, col, row = split_address(rng)
if sh:
str = 'eval_cell("' + rng + '")'
else:
str = 'eval_cell("' + sheet + rng + '")'
return str
class FunctionNode(ASTNode):
"""AST node representing a function call"""
def __init__(self, *args):
super(FunctionNode, self).__init__(*args)
self.numargs = 0
# map excel functions onto their python equivalents
self.funmap = pycel.excellib.FUNCTION_MAP
def emit(self, ast, context=None):
fun = self.tvalue.lower()
str = ''
# Get the arguments
args = self.children(ast)
if fun == "atan2":
# swap arguments
str = "atan2(%s,%s)" % (args[1].emit(ast, context=context), args[0].emit(ast, context=context))
elif fun == "pi":
# constant, no parens
str = "pi"
elif fun == "if":
# inline the if
if len(args) == 2:
str = "%s if %s else 0" % (args[1].emit(ast, context=context), args[0].emit(ast, context=context))
elif len(args) == 3:
str = "(%s if %s else %s)" % (args[1].emit(ast, context=context), args[0].emit(ast, context=context),
args[2].emit(ast, context=context))
else:
raise Exception("if with %s arguments not supported" % len(args))
elif fun == "array":
str += '['
if len(args) == 1:
# only one row
str += args[0].emit(ast, context=context)
else:
# multiple rows
str += ",".join(['[' + n.emit(ast, context=context) + ']' for n in args])
str += ']'
elif fun == "arrayrow":
# simply create a list
str += ",".join([n.emit(ast, context=context) for n in args])
elif fun == "linest" or fun == "linestmario":
str = fun + "(" + ",".join([n.emit(ast, context=context) for n in args])
if not context:
degree, coef = -1, -1
else:
# linests are often used as part of an array formula spanning multiple cells,
# one cell for each coefficient. We have to figure out where we currently are
# in that range
degree, coef = get_linest_degree(context.excel, context.curcell)
# if we are the only linest (degree is one) and linest is nested -> return vector
# else return the coef.
if degree == 1 and self.parent(ast):
if fun == "linest":
str += ",degree=%s)" % degree
else:
str += ")"
else:
if fun == "linest":
str += ",degree=%s)[%s]" % (degree, coef - 1)
else:
str += ")[%s]" % (coef - 1)
elif fun == "and":
str = "all([" + ",".join([n.emit(ast, context=context) for n in args]) + "])"
elif fun == "or":
str = "any([" + ",".join([n.emit(ast, context=context) for n in args]) + "])"
else:
# map to the correct name
f = self.funmap.get(fun, fun)
str = f + "(" + ",".join([n.emit(ast, context=context) for n in args]) + ")"
return str
def create_node(t):
"""Simple factory function"""
if t.ttype == "operand":
if t.tsubtype == "range":
return RangeNode(t)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t)
elif t.ttype.startswith("operator"):
return OperatorNode(t)
else:
return ASTNode(t)
class Operator:
"""Small wrapper class to manage operators during shunting yard"""
def __init__(self, value, precedence, associativity):
self.value = value
self.precedence = precedence
self.associativity = associativity
def shunting_yard(expression):
"""
Tokenize an excel formula expression into reverse polish notation
Core algorithm taken from wikipedia with varargs extensions from
http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/
"""
# remove leading =
if expression.startswith('='):
expression = expression[1:]
p = ExcelParser();
p.parse(expression)
# insert tokens for '(' and ')', to make things clearer below
tokens = []
for t in p.tokens.items:
if t.ttype == "function" and t.tsubtype == "start":
t.tsubtype = ""
tokens.append(t)
tokens.append(f_token('(', 'arglist', 'start'))
elif t.ttype == "function" and t.tsubtype == "stop":
tokens.append(f_token(')', 'arglist', 'stop'))
elif t.ttype == "subexpression" and t.tsubtype == "start":
t.tvalue = '('
tokens.append(t)
elif t.ttype == "subexpression" and t.tsubtype == "stop":
t.tvalue = ')'
tokens.append(t)
else:
tokens.append(t)
# print "tokens: ", "|".join([x.tvalue for x in tokens])
# http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
operators = {}
operators[':'] = Operator(':', 8, 'left')
operators[''] = Operator(' ', 8, 'left')
operators[','] = Operator(',', 8, 'left')
operators['u-'] = Operator('u-', 7, 'left') # unary negation
operators['%'] = Operator('%', 6, 'left')
operators['^'] = Operator('^', 5, 'left')
operators['*'] = Operator('*', 4, 'left')
operators['/'] = Operator('/', 4, 'left')
operators['+'] = Operator('+', 3, 'left')
operators['-'] = Operator('-', 3, 'left')
operators['&'] = Operator('&', 2, 'left')
operators['='] = Operator('=', 1, 'left')
operators['<'] = Operator('<', 1, 'left')
operators['>'] = Operator('>', 1, 'left')
operators['<='] = Operator('<=', 1, 'left')
operators['>='] = Operator('>=', 1, 'left')
operators['<>'] = Operator('<>', 1, 'left')
output = collections.deque()
stack = []
were_values = []
arg_count = []
for t in tokens:
if t.ttype == "operand":
output.append(create_node(t))
if were_values:
were_values.pop()
were_values.append(True)
elif t.ttype == "function":
stack.append(t)
arg_count.append(0)
if were_values:
were_values.pop()
were_values.append(True)
were_values.append(False)
elif t.ttype == "argument":
while stack and (stack[-1].tsubtype != "start"):
output.append(create_node(stack.pop()))
if were_values.pop(): arg_count[-1] += 1
were_values.append(False)
if not len(stack):
raise Exception("Mismatched or misplaced parentheses")
elif t.ttype.startswith('operator'):
if t.ttype.endswith('-prefix') and t.tvalue == "-":
o1 = operators['u-']
else:
o1 = operators[t.tvalue]
while stack and stack[-1].ttype.startswith('operator'):
if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue == "-":
o2 = operators['u-']
else:
o2 = operators[stack[-1].tvalue]
if ((o1.associativity == "left" and o1.precedence <= o2.precedence)
or
(o1.associativity == "right" and o1.precedence < o2.precedence)):
output.append(create_node(stack.pop()))
else:
break
stack.append(t)
elif t.tsubtype == "start":
stack.append(t)
elif t.tsubtype == "stop":
while stack and stack[-1].tsubtype != "start":
output.append(create_node(stack.pop()))
if not stack:
raise Exception("Mismatched or misplaced parentheses")
stack.pop()
if stack and stack[-1].ttype == "function":
f = create_node(stack.pop())
a = arg_count.pop()
w = were_values.pop()
if w: a += 1
f.num_args = a
# print f, "has ",a," args"
output.append(f)
while stack:
if stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop":
raise Exception("Mismatched or misplaced parentheses")
output.append(create_node(stack.pop()))
# print "Stack is: ", "|".join(stack)
# print "Ouput is: ", "|".join([x.tvalue for x in output])
# convert to list
result = [x for x in output]
return result
def build_ast(expression):
"""build an AST from an Excel formula expression in reverse polish notation"""
# use a directed graph to store the tree
G = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n, OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
G.add_node(arg1, {'pos': 1})
G.add_node(arg2, {'pos': 2})
G.add_edge(arg1, n)
G.add_edge(arg2, n)
else:
arg1 = stack.pop()
G.add_node(arg1, {'pos': 1})
G.add_edge(arg1, n)
elif isinstance(n, FunctionNode):
args = [stack.pop() for _ in range(n.num_args)]
args.reverse()
for i, a in enumerate(args):
G.add_node(a, {'pos': i})
G.add_edge(a, n)
# for i in range(n.num_args):
# G.add_edge(stack.pop(),n)
else:
G.add_node(n, {'pos': 0})
stack.append(n)
return G, stack.pop()
class Context(object):
"""A small context object that nodes in the AST can use to emit code"""
def __init__(self, curcell, excel):
# the current cell for which we are generating code
self.curcell = curcell
# a handle to an excel instance
self.excel = excel
class ExcelCompiler(object):
"""Class responsible for taking an Excel spreadsheet and compiling it to a Spreadsheet instance
that can be serialized to disk, and executed independently of excel.
"""
def __init__(self, filename=None, excel=None, *args, **kwargs):
super(ExcelCompiler, self).__init__()
self.filename = filename
if excel:
# if we are running as an excel addin, this gets passed to us
self.excel = excel
else:
# TODO: use a proper interface so we can (eventually) support loading from file (much faster) Still need to find a good lib though.
self.excel = ExcelWrapperImpl(filename=filename)
self.excel.connect()
self.log = logging.getLogger("decode.{0}".format(self.__class__.__name__))
def cell2code(self, cell):
"""Generate python code for the given cell"""
if cell.formula:
e = shunting_yard(cell.formula or str(cell.value))
print e
ast, root = build_ast(e)
code = root.emit(ast, context=Context(cell, self.excel))
else:
ast = None
code = str('"' + cell.value + '"' if isinstance(cell.value, unicode) else cell.value)
return code, ast
def add_node_to_graph(self, G, n):
G.add_node(n)
G.node[n]['sheet'] = n.sheet
if isinstance(n, Cell):
G.node[n]['label'] = n.col + str(n.row)
else:
# strip the sheet
G.node[n]['label'] = n.address()[n.address().find('!') + 1:]
def gen_graph(self, seed, sheet=None):
"""Given a starting point (e.g., A6, or A3:B7) on a particular sheet, generate
a Spreadsheet instance that captures the logic and control flow of the equations."""
# starting points
cursheet = sheet if sheet else self.excel.get_active_sheet()
self.excel.set_sheet(cursheet)
seeds, nr, nc = Cell.make_cells(self.excel, seed,
sheet=cursheet) # no need to output nr and nc here, since seed can be a list of unlinked cells
seeds = list(flatten(seeds))
print "Seed %s expanded into %s cells" % (seed, len(seeds))
# only keep seeds with formulas or numbers
seeds = [s for s in seeds if s.formula or isinstance(s.value, (int, float))]
print "%s filtered seeds " % len(seeds)
# cells to analyze: only formulas
todo = [s for s in seeds if s.formula]
print "%s cells on the todo list" % len(todo)
# map of all cells
cellmap = dict([(x.address(), x) for x in seeds])
# directed graph
G = nx.DiGraph()
# match the info in cellmap
for c in cellmap.itervalues(): self.add_node_to_graph(G, c)
while todo:
c1 = todo.pop()
print "Handling ", c1.address()
# set the current sheet so relative addresses resolve properly
if c1.sheet != cursheet:
cursheet = c1.sheet
self.excel.set_sheet(cursheet)
# parse the formula into code
pystr, ast = self.cell2code(c1)
# set the code & compile it (will flag problems sooner rather than later)
c1.python_expression = pystr
c1.compile()
# get all the cells/ranges this formula refers to
deps = [x.tvalue.replace('$', '') for x in ast.nodes() if isinstance(x, RangeNode)]
# remove dupes
deps = uniqueify(deps)
for dep in deps:
# if the dependency is a multi-cell range, create a range object
if is_range(dep):
# this will make sure we always have an absolute address
rng = CellRange(dep, sheet=cursheet)
if rng.address() in cellmap:
# already dealt with this range
# add an edge from the range to the parent
G.add_edge(cellmap[rng.address()], cellmap[c1.address()])
continue
else:
# turn into cell objects
cells, nrows, ncols = Cell.make_cells(self.excel, dep, sheet=cursheet)
# get the values so we can set the range value
if nrows == 1 or ncols == 1:
rng.value = [c.value for c in cells]
else:
rng.value = [[c.value for c in cells[i]] for i in range(len(cells))]
# save the range
cellmap[rng.address()] = rng
# add an edge from the range to the parent
self.add_node_to_graph(G, rng)
G.add_edge(rng, cellmap[c1.address()])
# cells in the range should point to the range as their parent
target = rng
else:
# not a range, create the cell object
cells = [Cell.resolve_cell(self.excel, dep, sheet=cursheet)]
target = cellmap[c1.address()]
# process each cell
for c2 in flatten(cells):
# if we havent treated this cell allready
if c2.address() not in cellmap:
if c2.formula:
# cell with a formula, needs to be added to the todo list
todo.append(c2)
# print "appended ", c2.address()
else:
# constant cell, no need for further processing, just remember to set the code
pystr, ast = self.cell2code(c2)
c2.python_expression = pystr
c2.compile()
# print "skipped ", c2.address()
# save in the cellmap
cellmap[c2.address()] = c2
# add to the graph
self.add_node_to_graph(G, c2)
# add an edge from the cell to the parent (range or cell)
G.add_edge(cellmap[c2.address()], target)
print "Graph construction done, %s nodes, %s edges, %s cellmap entries" % (
len(G.nodes()), len(G.edges()), len(cellmap))
sp = Spreadsheet(G, cellmap)
return sp
if __name__ == '__main__':
import re
def encode_conditions(conditions):
for i in range(len(conditions)):
conditions[i] = conditions[i].replace("(s)", '("s")')
conditions[i] = conditions[i].replace("(r)", '("r")')
exists = re.findall(r'(exists\(.*?\))', conditions[i], re.M | re.I)
for j in range(len(exists)):
conditions[i] = conditions[i].replace(exists[j], '\"' + exists[j] + '\"')
for_each = re.findall(r'(foreach\(.*?\))', conditions[i], re.M | re.I)
for j in range(len(for_each)):
conditions[i] = conditions[i].replace(for_each[j], '\"' + for_each[j] + '\"')
return conditions
def generate_quantifier_vector(quantifier, type='exists'):
'''Receive an exist condition and generate a boolean vector based on it's condition
Type can be either exists or for_each'''
exp_in_paranth = re.findall(r'' + type + '\((.*?)\)', quantifier, re.M | re.I)
exp_in_paranth = exp_in_paranth[0].split(",")
vecs = re.findall(r'(.)\[.\]', exp_in_paranth[-1], re.M | re.I)
condition_vec = "1 " if type == 'exists' else "0 "
condition_vec += "in [1 if " + exp_in_paranth[-1] + " else 0 "
for i in range(len(exp_in_paranth) - 1):
condition_vec += "for " + exp_in_paranth[i] + " in range(len(" + vecs[i] + ")) "
condition_vec += "]"
return condition_vec
def decode_conditions(conditions):
for i in range(len(conditions)):
conditions[i] = conditions[i].replace('("s")', '(s)')
conditions[i] = conditions[i].replace('("r")', '(r)')
for quantifier in ['exists', 'foreach']:
exists = re.findall(r'\"(' + quantifier + '\(.*?\))\"', conditions[i], re.M | re.I)
for j in range(len(exists)):
exists_with_indices = list(exists)
entries = re.findall(r'(._.)', exists[j], re.M | re.I)
for k in range(len(entries)):
exists_with_indices[j] = exists_with_indices[j].replace(entries[k],
(entries[k].replace("_", "[") + "]"))
if not (">" in exists_with_indices[j]) and not ("<" in exists_with_indices[j]):
exists_with_indices[j] = exists_with_indices[j].replace("=", "==")
exists_with_indices[j] = generate_quantifier_vector(exists_with_indices[j], quantifier)
conditions[i] = conditions[i].replace('\"' + exists[j] + '\"', exists_with_indices[j])
return conditions
def parse_conditions(conds):
conds = encode_conditions(conds)
python_inputs = []
for i in conds:
print "**************************************************"
print "Formula: ", i
e = shunting_yard(i);
# print "RPN: ", "|".join([str(x) for x in e])
G, root = build_ast(e)
python_inputs += [root.emit(G, context=None)]
print "Python code: ", root.emit(G, context=None)
print "**************************************************"
return decode_conditions(python_inputs)
def classify_strategies_to_dimensions(strategies, dimensions_matrix, dimensions_rows_conds,
dimensions_columns_conds):
row = ""
col = ""
for t in strategies:
s = tuple(t)
exec "row =" + dimensions_rows_conds[0]
exec "col =" + dimensions_columns_conds[0]
dimensions_matrix[row][col][s] = dict()
return dimensions_matrix
def create_dimensions_matrix(dimensions_rows_categories_names, dimensions_columns_categories_names):
dimensions_matrix = {row_name: dict() for row_name in dimensions_rows_categories_names}
for row_name in dimensions_matrix:
for col_name in dimensions_columns_categories_names:
dimensions_matrix[row_name][col_name] = dict()
return dimensions_matrix
def calc_payments(dimensions_matrix,payment_conds):
for row in dimensions_matrix:
for col in dimensions_matrix[row]:
for strategy in dimensions_matrix[row][col]:
# print "first level= "+str(row)+","+str(col)+":"+str(strategy)
for row2 in dimensions_matrix:
dimensions_matrix[row][col][strategy][row2] = dict()
for col2 in dimensions_matrix[row2]:
dimensions_matrix[row][col][strategy][row2][col2] = dict()
for strategy2 in dimensions_matrix[row2][col2]:
dimensions_matrix[row][col][strategy][row2][col2][strategy2] = dict()
# print "second level= "+str(row)+","+str(col)+":"+str(strategy)+str(row2)+","+str(col2)+":"+str(strategy2)
s = strategy
r = strategy2
payment = 0
exec "payment=" + payment_conds[0]
dimensions_matrix[row][col][strategy][row2][col2][strategy2]["val"] = payment
# print "third level= " + str(row) + "," + str(col) + ":" + str(strategy) + str(
# row2) + "," + str(col2) + ":" + str(strategy2)+"="+str(payment)
for row in dimensions_matrix:
for col in dimensions_matrix[row]:
for strategy in dimensions_matrix[row][col]:
for row2 in dimensions_matrix[row][col][strategy]:
for col2 in dimensions_matrix[row][col][strategy][row2]:
cell_size = len(dimensions_matrix[row][col][strategy][row2][col2])
pyments_in_cell = [
eval(str(dimensions_matrix[row][col][strategy][row2][col2][strategy2]["val"])) for
strategy2
in dimensions_matrix[row][col][strategy][row2][col2]]
uni_payment = sum([(1 / float(cell_size)) * payment for payment in pyments_in_cell])
dimensions_matrix[row][col][strategy][row2][col2]["uniform_payment"] = uni_payment
# print "second level= " + str(row) + "," + str(col) + ":" + str(strategy) + str(
# row2) + "," + str(col2) + ":" + str(len(dimensions_matrix[row][col][strategy][row2][col2]))+",uni="+str(uni_payment)
# dimensions_matrix_copy = dict(dimensions_matrix)
# for row in dimensions_matrix:
# for col in dimensions_matrix[row]:
# strategy = dimensions_matrix[row][col].keys()[0]
# for row2 in dimensions_matrix[row][col][strategy]:
# for col2 in dimensions_matrix[row][col][strategy][row2]:
# if row==row2 and col==col2:
# # a=1
# dimensions_matrix_copy[row][col]["uniform_payment"]= dimensions_matrix[row][col][strategy][row2][col2]["uniform_payment"]
# dimensions_matrix = dict(dimensions_matrix_copy)
return dimensions_matrix
def calc_MD_eq(dimensions_matrix, dimensions_ordered_row, dimensions_ordered_col):
for row in dimensions_matrix:
for col in dimensions_matrix[row]:
for strategy in dimensions_matrix[row][col]:
is_MD_eq = True
row_index = dimensions_ordered_row.index(row)
if row_index != 0:
if dimensions_matrix[row][col][strategy][row][col]["uniform_payment"] < \
dimensions_matrix[row][col][strategy][dimensions_ordered_row[row_index - 1]][col][
"uniform_payment"]:
is_MD_eq = False
if row_index != len(dimensions_ordered_row) - 1:
if dimensions_matrix[row][col][strategy][row][col]["uniform_payment"] < \
dimensions_matrix[row][col][strategy][dimensions_ordered_row[row_index + 1]][col][
"uniform_payment"]:
is_MD_eq = False
col_index = dimensions_ordered_col.index(col)
if col_index != 0:
if dimensions_matrix[row][col][strategy][row][col]["uniform_payment"] < \
dimensions_matrix[row][col][strategy][row][dimensions_ordered_col[col_index - 1]][
"uniform_payment"]:
is_MD_eq = False
if col_index != len(dimensions_ordered_col) - 1:
if dimensions_matrix[row][col][strategy][row][col]["uniform_payment"] < \
dimensions_matrix[row][col][strategy][row][dimensions_ordered_col[col_index + 1]][
"uniform_payment"]:
is_MD_eq = False
if is_MD_eq:
dimensions_matrix[row][col][strategy]["is_MD_eq"] = True
else:
dimensions_matrix[row][col][strategy]["is_MD_eq"] = False
return dimensions_matrix
def calc_Global_eq(dimensions_matrix):
for row in dimensions_matrix:
for col in dimensions_matrix[row]:
for strategy in dimensions_matrix[row][col]:
if dimensions_matrix[row][col][strategy]["is_MD_eq"]:
is_Global_eq = True
for row2 in dimensions_matrix[row][col][strategy]:
if type(dimensions_matrix[row][col][strategy][row2]) != bool:
for col2 in dimensions_matrix[row][col][strategy][row2]:
if dimensions_matrix[row][col][strategy][row][col]["uniform_payment"] < \
dimensions_matrix[row][col][strategy][row2][col2]["uniform_payment"]:
is_Global_eq = False
if is_Global_eq:
dimensions_matrix[row][col][strategy]["is_Global_eq"] = True
else:
dimensions_matrix[row][col][strategy]["is_Global_eq"] = False
return dimensions_matrix
# some test formulas
payment_conds1 = [
# '=5*log(sin(6)+2)',
# '=5*log(sin(3,7,9)+2)',
# '=3 + 4 * 2 / ( 1 - 5 ) ^ 2 ^ 3',
'=IF(exists(i,j,s_i=r_j),0,IF(LEN(s)=2,3,2))',
# '=IF(foreach(i,j,s_i=r_j),0,IF(LEN(s)=2,3,2))',
# '=IF(foreach(i,s_i=r_i),0,IF(LEN(s)=2,3,2))',
]
# some test formulas
dimensions_conds1 = [
# '=5*log(sin(6)+2)',
# '=5*log(sin(3,7,9)+2)',
# '=3 + 4 * 2 / ( 1 - 5 ) ^ 2 ^ 3',
'=IF(exists(i,s_i=3),"center","not center")',
'=IF(len(s)>1,"two","one")',
# '=IF(foreach(i,j,s_i=r_j),0,IF(LEN(s)=2,3,2))',
# '=IF(foreach(i,s_i=r_i),0,IF(LEN(s)=2,3,2))',
]
#
# payment_conds = encode_conditions(payment_conds)
# print payment_conds
# python_inputs = []
# for i in payment_conds:
# print "**************************************************"
# print "Formula: ", i
# e = shunting_yard(i);
# # print "RPN: ", "|".join([str(x) for x in e])
# G, root = build_ast(e)
# python_inputs += [root.emit(G, context=None)]
# print "Python code: ", root.emit(G, context=None)
# print "**************************************************"
#
#
# print python_inputs
# payment_conds = parse_conditions(payment_conds1)
# print payment_conds
# print decode_conditions(python_inputs)
# dimensions_conds = parse_conditions(dimensions_conds)
# print dimensions_conds
# s = [2, 3]
# exec "print " + dimensions_conds[0]
strategies_vector1 = [[1], [2], [3], [4], [5], [1, 2], [2, 3], [3, 4], [4, 5]]
dimensions_rows_categories_names1 = {"dimensions_row_category_name_1": "center",
"dimensions_row_category_name_2": "not center"}
dimensions_columns_categories_names1 = {"dimensions_column_category_name_1": "one",
"dimensions_column_category_name_2": "two"}
dimensions_ordered_row1 = ["center", "not center"]
dimensions_ordered_col1 = ["one", "two"]
dimensions_rows_conds1 = [dimensions_conds1[0]]
dimensions_columns_conds1 = [dimensions_conds1[1]]
def full_calc(strategies_vector, dimensions_rows_conds, dimensions_columns_conds, dimensions_rows_categories_names,
dimensions_columns_categories_names, dimensions_ordered_row, dimensions_ordered_col,payment_conds):
dimensions_rows_conds = parse_conditions(dimensions_rows_conds)
dimensions_columns_conds = parse_conditions(dimensions_columns_conds)
payment_conds = parse_conditions(payment_conds)
dimensions_matrix = create_dimensions_matrix(dimensions_rows_categories_names,
dimensions_columns_categories_names)
print str(dimensions_matrix)
dimensions_matrix = classify_strategies_to_dimensions(strategies_vector, dimensions_matrix,
dimensions_rows_conds,
dimensions_columns_conds)
print dimensions_matrix
dimensions_matrix = calc_payments(dimensions_matrix,payment_conds)
print "\n calc global eq"
print "*************************************"
dimensions_matrix = calc_MD_eq(dimensions_matrix, dimensions_ordered_row, dimensions_ordered_col)
dimensions_matrix = calc_Global_eq(dimensions_matrix)
for row in dimensions_matrix:
for col in dimensions_matrix[row]:
for strategy in dimensions_matrix[row][col]:
print str(row) + "," + str(col) + ":" + str(dimensions_matrix[row][col][strategy]["is_Global_eq"])
print "\n calc MD eq"
print "*************************************"
for row in dimensions_matrix:
for col in dimensions_matrix[row]:
for strategy in dimensions_matrix[row][col]:
print str(row) + "," + str(col) + ":" + str(dimensions_matrix[row][col][strategy]["is_MD_eq"])
full_calc(strategies_vector1, dimensions_rows_conds1, dimensions_columns_conds1,
dimensions_ordered_row1, dimensions_ordered_col1, dimensions_ordered_row1,
dimensions_ordered_col1,payment_conds1)
# for row in dimensions_matrix:
# for col in dimensions_matrix[row]:
# for strategy in dimensions_matrix[row][col]:
# print str(row) + "," + str(col) + ":" +str(dimensions_matrix[row][col][strategy]["is_Global_eq"])
# def calc_payments(dimensionized_conditions):
# for strategy in dimensionized_conditions:
| gpl-3.0 | -7,260,504,299,018,833,000 | 38.833988 | 151 | 0.518113 | false | 3.997141 | false | false | false |
TilCreator/Tapastic-Comic-Downloader | tapas-dl.py | 1 | 11896 | #!/bin/env python3
from pyquery import PyQuery as pq
from pathlib import Path
import os
import argparse
import re
import requests
def lead0(num, max):
return str(num).zfill(len(str(max)))
def terminal_size():
try:
import fcntl
import termios
import struct
th, tw, hp, wp = struct.unpack('HHHH', fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))
except (IOError, ModuleNotFoundError):
th, tw = 80, 200
return tw, th
def printLine(msg='', noNewLine=False):
terminalWidth = terminal_size()[0]
spaces = terminalWidth - len(msg)
if noNewLine:
if args.verbose:
print(' ' + msg + (' ' * (spaces - 1)))
else:
print(msg + (' ' * spaces), end='\r')
else:
print(msg + (' ' * spaces))
def check_path(path, slash=True, fat=False):
evil_chars = []
if slash:
evil_chars.append('/')
if fat:
evil_chars += ['?', '<', '>', '\\', ':', '*', '|', '"', '^']
return ''.join([char for char in path if char not in evil_chars])
# parse input and settup help
parser = argparse.ArgumentParser(description='Downloads Comics from \'https://tapas.io\'.\nIf folder of downloaded comic is found, it will only update (can be disabled with -f/--force).', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('url', metavar='URL/name', type=str, nargs='+',
help='URL or URL name to comic\nGo to the comic you want to download (any page)\nRightclick on the comic name in the upper left corner and select "Copy linkaddress" (Or similar) or just use the name behind series in the url\nExamples: https://tapas.io/series/Erma, RavenWolf, ...')
parser.add_argument('-f', '--force', action="store_true", help='Disables updater.')
parser.add_argument('-v', '--verbose', action="store_true", help='Enables verbose mode.')
parser.add_argument('-c', '--restrict-characters', action="store_true", help='Removes \'? < > \\ : * | " ^\' from file names')
parser.add_argument('-o', '--output-dir', type=str, nargs='?', default="", dest='baseDir', metavar='C:\\',
help='Output directory where comics should be placed.\nIf left blank, the script folder will be used.')
args = parser.parse_args()
basePath = ""
if (args.baseDir):
basePath = Path(args.baseDir)
for urlCount, url in enumerate(args.url):
# check url/name
if re.match(r'^https://tapas\.io/series/.+$', url):
urlName = url[url.rindex('/') + 1:]
else:
urlName = url
printLine('Loading ' + urlName + '...', True)
# Get comic start page and test if comic exsists
pageReqest = requests.get('https://tapas.io/series/' + urlName, headers={'user-agent': 'tapas-dl'})
if pageReqest.status_code != 200:
printLine('Error: Comic "{}" not found\n'.format(urlName))
break
page = pq(pageReqest.text)
name = page('.center-info__title.center-info__title--small').text()
author = page('div.viewer-section.viewer-section--episode a.name').text()
seriesId = page('.subscribe-btn').attr('data-id')
if len(page('.row-item--info > img')) > 0:
headerSrc = page('.row-item--info > img').attr('src')
else:
headerSrc = None
data = []
page = pq(requests.get(f'https://tapas.io/series/{seriesId}/episodes?page=1&sort=OLDEST&max_limit=99999999', # It's over 9000! But I love that they forgot to limit the max_limit, because that means I don't have to bother with pagination ^^
headers={'user-agent': 'tapas-dl'}).json()['data']['body'])
for episode in page('[data-permalink*="/episode/"]'):
data.append({'id': int(episode.attrib['data-permalink'][episode.attrib['data-permalink'].rfind('/') + 1:])})
printLine('{} [{}] ({} pages):'.format(name, urlName, len(data)))
# Check if folder exsists, if not create it
printLine('Checking folder...', True)
# If the user specified a base output directory, prepend that on our folder
savePath = check_path('{} [{}]'.format(name, urlName), fat=args.restrict_characters)
if (basePath != ""):
savePath = os.path.join(basePath, savePath)
printLine('Full path is: ' + str(savePath))
if os.path.isdir(savePath) and not args.force:
printLine('Found directory, only updating (use -f/--force to disable)')
filesInDir = list(os.scandir(savePath))
fileNames = []
for fileInDir in filesInDir:
fileNames.append(fileInDir.name)
fileNames.sort()
imgOffset = len(fileNames)
if imgOffset > 1:
lastFile = fileNames[-1]
lastPageId = int(lastFile[lastFile.rindex('#') + 1:lastFile.rindex('.')])
pageOffset = next(i for i, page in enumerate(data) if page['id'] == lastPageId) + 1
data = data[pageOffset:]
else:
pageOffset = 0
else:
if not os.path.isdir(savePath):
os.mkdir(savePath)
printLine('Creating folder...', True)
pageOffset = 0
imgOffset = 0
# Download header
if True not in [file.name.startswith('-1 - header.') for file in os.scandir(savePath)]:
printLine('Downloading header...', True)
if headerSrc is not None:
with open(os.path.join(savePath, '-1 - header.{}'.format(headerSrc[headerSrc.rindex('.') + 1:])), 'wb') as f:
f.write(requests.get(headerSrc).content)
printLine('Downloaded header')
else:
printLine('Header not found')
if len(data) <= 0:
print('Nothing todo: No pages found or all already downloaded\n')
continue
# Check if series is comic or novel
if len(pq(f'https://tapas.io/episode/{data[0]["id"]}', headers={'user-agent': 'tapas-dl'})('.content__img.js-lazy')) > 0:
printLine('Detected comic')
# Get images from page from JS api
allImgCount = 0
for pageCount, pageData in enumerate(data):
# Test whether the page we have in mind is reachable
pageReqest = requests.get(f'https://tapas.io/episode/{pageData["id"]}', headers={'user-agent': 'tapas-dl'})
if pageReqest.status_code != 200:
# This page was unavailable. Let the user know and add a single dummy image entry.
# (We will check for this when we go to download images.)
printLine('Error: "{}" page {}/{} not found. Page Request yielded: {} (Early Access page?)'.format(urlName, pageCount + pageOffset, len(data) + pageOffset, str(pageReqest.status_code)), True)
pageData['title'] = "PageUnavailable"
pageData['imgs'] = []
pageData['imgs'].append("PageUnavailable")
else:
# If the page did not yield an access error, go ahead an scrape for image entries.
pageHtml = pq(f'https://tapas.io/episode/{pageData["id"]}', headers={'user-agent': 'tapas-dl'})
printLine('Downloaded image data from {} images (pages {}/{})...'.format(allImgCount, pageCount + pageOffset, len(data) + pageOffset), True)
pageData['title'] = pageHtml('.info__title').text()
pageData['imgs'] = []
for img in pageHtml('.content__img'):
pageData['imgs'].append(pq(img).attr('data-src'))
allImgCount += 1
# Download images
imgCount = 0
for pageCount, pageData in enumerate(data):
for imgOfPageCount, img in enumerate(pageData['imgs']):
# Check if the first image entry is the fummy text that indicates the page was unavailable when we tried to scrape it.
if pageData['imgs'][0] != "PageUnavailable":
# If the entry isn't a dummy entry, go ahead and download the images it contains.
with open(os.path.join(savePath, check_path('{} - {} - {} - {} - #{}.{}'.format(lead0(imgCount + imgOffset, allImgCount + imgOffset), lead0(pageCount + pageOffset, len(pageData) + pageOffset),
lead0(imgOfPageCount, len(pageData['imgs'])), pageData['title'], pageData['id'], img[img.rindex('.') + 1:]),
fat=args.restrict_characters)), 'wb') as f:
f.write(requests.get(img).content)
imgCount += 1
printLine('Downloaded image {}/{} from page {}/{} ({}/{} images)...'.format(imgOfPageCount + 1, len(pageData['imgs']), pageCount + pageOffset, len(data) + pageOffset, imgCount + imgOffset, allImgCount + imgOffset), True)
else:
# If the entry was a dummy entry, skip it and let the user know.
printLine('Error: No images downloaded from page {}/{}.'.format(pageCount + pageOffset, len(data) + pageOffset), True)
if data != []:
printLine('Downloaded {} of {} images'.format(imgCount, allImgCount))
else:
printLine('Nothing to do')
if urlCount + 1 != len(args.url):
printLine()
else:
printLine('Detected novel')
from ebooklib import epub
# download/create epub
book = epub.EpubBook()
customCss = None
# Add meta data
book.set_identifier(urlName)
book.set_title(name)
book.set_language('en')
book.add_author(author)
header_name = os.path.join(savePath, list(filter(re.compile(r'.+header\..+').match, os.listdir(savePath)))[0])
book.set_cover("cover.jpg", open(header_name, 'rb').read())
book.toc = []
book.spine = ['cover']
# create about page
chapter = epub.EpubHtml(title='about', file_name='about.xhtml')
chapter.content = f'<h1>About</h1><p>Title: {name}</p><p>Author: {book.metadata["http://purl.org/dc/elements/1.1/"]["creator"][0][0]}</p><p>Source: <a href="{"https://tapas.io/series/" + urlName}">{"https://tapas.io/series/" + urlName}</a></p>'
book.add_item(chapter)
book.spine.append(chapter)
# Append nav page
book.spine.append('nav')
# create chapters
for pageCount, pageData in enumerate(data):
printLine('Downloaded page {}/{}...'.format(pageCount + 1, len(data)), True)
pagePq = pq(url='https://tapas.io/episode/' + str(pageData['id']), headers={'user-agent': 'tapas-dl'})
pageTitle = pagePq('.viewer__header > .title').text()
pageHtml = f'<h1>{pageTitle}</h1>'
for p in pagePq('article.viewer__body > div.ep-epub-content > div.body > p'):
p = pq(p)
if p.text() is not None:
pageHtml += '<p>' + p.text() + '</p>'
chapter = epub.EpubHtml(title=pageTitle, file_name=str(pageData['id']) + '.xhtml')
chapter.content = pageHtml
book.add_item(chapter)
# define Table Of Contents
book.toc.append(epub.Link(str(pageData['id']) + '.xhtml', pageTitle, str(pageData['id'])))
# basic spine
book.spine.append(chapter)
# add default NCX and Nav file
book.add_item(epub.EpubNcx())
book.add_item(epub.EpubNav())
# add CSS
style = ''
nav_css = epub.EpubItem(uid="style_nav", file_name="style/nav.css", media_type="text/css", content=style)
book.add_item(nav_css)
# write to the file
epub.write_epub(os.path.join('/'.join(savePath.split('/')[:-1]), check_path(f'{name}.epub', fat=args.restrict_characters)), book)
# remove tmp folder
for file in os.listdir(savePath):
os.remove(os.path.join(savePath, file))
os.removedirs(savePath)
| mit | -8,863,121,167,300,171,000 | 40.740351 | 301 | 0.580531 | false | 3.716339 | false | false | false |
treefrogframework/FrameworkBenchmarks | frameworks/Python/pyramid/frameworkbenchmarks/models.py | 15 | 1305 | """
Benchmark models.
"""
import json
import os
import psycopg2
from collections import Iterable
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import QueuePool
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
def get_conn():
return psycopg2.connect(
user='benchmarkdbuser',
password='benchmarkdbpass',
host='tfb-database',
port='5432',
database='hello_world')
conn_pool = QueuePool(get_conn, pool_size=100, max_overflow=25, echo=False)
pg = create_engine('postgresql://', pool=conn_pool)
DBSession = sessionmaker(bind=pg)()
metadata = MetaData()
DatabaseBase = declarative_base()
class World(DatabaseBase):
__tablename__ = 'world'
id = Column('id', Integer, primary_key=True)
randomNumber = Column(
'randomnumber', Integer, nullable=False, server_default='0')
def __json__(self, request=None):
return {'id': self.id, 'randomNumber': self.randomNumber}
class Fortune(DatabaseBase):
__tablename__ = 'fortune'
id = Column('id', Integer, primary_key=True)
message = Column('message', String, nullable=False)
def __json__(self):
return {'id': self.id, 'message': self.message}
| bsd-3-clause | 5,405,198,107,441,629,000 | 24.588235 | 78 | 0.687356 | false | 3.760807 | false | false | false |
Shu-Ji/ebook-chinese-ocr | ocr.py | 1 | 7062 | # coding: u8
from hashlib import md5 as _m5
from itertools import izip
import cPickle as pickle
import os
import time
from PIL import Image
import Levenshtein
md5 = lambda s: _m5(s).hexdigest()
class Otsu(object):
FAST_VAL = 255 # 只要是非0就可以
BINARY_THRESHOLD = 190 # 二值化阈值
def __init__(self, path=None, im=None):
if im is None:
self.open_image(path)
else:
self.im = im
self.w, self.h = self.im.size
def get_vertical_projection(self, fast=True, rotate=False):
u'''得到二值图像的垂直投影图。
返回包含投影数据的列表,列表中的数字表示某列所有像素值为0的个数。
如果fast为True,那么当某列已经有字符,就不再继续查找此列了。
注意:此时返回的graph中的值为0或FAST_VAL,并不是总个数。
'''
im = self.im.transpose(Image.ROTATE_90) if rotate else self.im
pixels = im.load()
w, h = im.size
graph = [0] * w
for x in range(w):
for y in range(h):
pixel = pixels[x, y]
if pixel == 0: # 此列有字符
if fast: # 跳过此列
graph[x] = self.FAST_VAL
break
else:
graph[x] += 1
return graph
def show_vertical_projection(self, graph):
w = len(graph)
h = max(graph)
img = Image.new('1', (w, h))
for x in range(w):
for y in range(h):
if y <= graph[x]:
img.putpixel((x, y), 255)
else:
break
# 图是从左上角画的,为了方便查看,将其头尾旋转
img.transpose(Image.FLIP_TOP_BOTTOM).show()
return self
def open_image(self, path):
im = Image.open(path)
self.im = im.convert('L') # 灰度
self.im = self.im.point(
lambda p: p > self.BINARY_THRESHOLD and 255) # 二值
return self
def cut_to_lines(self, rotate=True, show=False):
u"""将二值图片按行切割。
原理:按照图片旋转90度后的垂直投影图切割。
"""
graph = self.get_vertical_projection(fast=True, rotate=rotate)
if show:
self.show_vertical_projection(graph)
if len(list(set(graph))) == 1: # 数字全为0,表示没有任何文字
return
starts = [] # 保存所有FAST_VAL元素在每行中第一次出现的index
ends = [] # 保存所有FAST_VAL元素在每行中最后一次出现的index
# 若graph = [0, 0, 255, 255, 255, 0, 0, 0, 255, 255, 0, 255, 0, 0]
# | | | | / \
# 则starts == [ 2, 4, 8, 9, 11, 11]
char = self.FAST_VAL # 找FAST_VAL
for i, v in enumerate(graph):
if v == char:
# 交换查找项
if char == self.FAST_VAL: # 找到当前行的第一个FAST_VAL
char = 0
starts.append(i)
else: # 找到当前行的最后一个FAST_VAL
char = self.FAST_VAL
ends.append(i - 1) # i为0的位置,i-1则为FAST_VAL的位置
if 0 and not rotate:
# 修正被分割的左右结构
# 左右结构之间的间隙比较小,这里取间隙小于Npx时认为是左右结构
N = 2
new_starts = []
new_ends = []
last_s = last_e = 0
def push(start, end):
new_starts.append(start)
new_ends.append(end)
for start, end in izip(starts, ends):
if last_s == 0:
push(start, end)
elif start - last_e < N:
new_ends[-1] = end
else:
push(start, end)
last_s, last_e = start, end
starts, ends = new_starts, new_ends
i = 1
for start, end in izip(starts, ends):
# graph中数据是旋转90度的结果,故保存的数据对原图像来说是y轴
if rotate:
box = (0, start, self.w, end)
else:
box = (start, 0, end, self.h)
yield self.im.crop(box), i
i += 1
if __name__ == '__main__':
import glob
glob.glob('./imgs/*.jpg')
otsu = Otsu('/home/finn/rubbish/ocr/test-10001.bmp')
#otsu.im.show()
i = 1000
def update(m5, char):
a = samples[m5]
a[-1] = char
samples[m5] = a
pickle_file = 'data.pickle'
samples = pickle.load(open(pickle_file, 'rb'))
bak_pickle_file = '%s._%d_%s' % (pickle_file, time.time(), '.bak')
open(bak_pickle_file, 'wb').write(open(pickle_file, 'rb').read())
"""
for fn in glob.glob('./cls/*.png'):
m5, char = fn.split('.')[1].split('/')[-1].split('_')
samples[m5] = [char, char, char]
"""
update('0538fd2620d99c82ea1627987d7c4e96', '偕-l')
for line, line_num in otsu.cut_to_lines():
#line.show()
line.save('/tmp/cut/0000000_cur_line.png')
otsu = Otsu(im=line)
for word, col_num in otsu.cut_to_lines(rotate=False, show=0):
_word = word
word = word.resize((48, 48), Image.BICUBIC).convert('1')
data = ''.join(str(p) for p in word.getdata()).replace('255', '1')
m5 = md5(data)
if m5 not in samples:
# 请开着目录/tmp/cut方便输入
path = '/tmp/cut/%s.%s_%s.png' % (line_num, col_num, m5)
word.save(path)
min_distance = len(data)
maybe = None
for key, value in samples.items():
binary_string = value[-2]
try:
distance = Levenshtein.hamming(binary_string, data)
except:
del samples[key]
if min_distance > distance:
maybe = value
min_distance = distance
maychar = maybe[-1]
print 'maybe:', maychar, min_distance
char = raw_input('input(press RETURN to accept %s):' % maychar)
if char == '':
char = maychar
os.remove(path)
os.system('clear')
samples[m5] = [word.tostring(), data, char]
pickle.dump(samples, open(pickle_file, 'wb'))
path = 'cls/%s_%s.png' % (m5, char)
_word.save(path)
else:
char = samples[m5][-1]
#samples[m5] = [word.tostring(), data, char]
print m5, char
path = 'cut/%s.%s_%s_%s.png' % (line_num, col_num, m5, char)
_word.save(path)
i += 1
| mit | -8,641,748,544,319,254,000 | 29.975845 | 79 | 0.46413 | false | 2.980939 | false | false | false |
mattstruble/crusty | crusty/game.py | 1 | 1610 | #!/usr/bin/env python
# Copyright (c) 2016 Matt Struble. All Rights Reserved.
#
# Use is subject to license terms.
#
# Author: Matt Struble
# Date: Aug. 31 2016
import time, os
from graphics.graphicsdevice import GraphicsDevice
from inputs import Keyboard, Keys
class Game: #{
""" Handles initialization and core game loop. """
FPS = 30.0
SEC_PER_FRAME = 1.0/FPS
def __init__(self, w='1000', h='300', title='Game'):
self.gd = GraphicsDevice(w, h, title)
self.run()
def run(self):
self._initialize()
self._loadContent()
self._loop()
def _initialize(self):
Keyboard.initialize()
self.running = True
def _terminate(self):
Keyboard.terminate()
self.running = False
def _loadContent(self):
pass
def _loop(self):
previous = time.time()
while self.running:
current = time.time()
dt = current - previous
previous = current
self._processInput()
self._update(dt)
self._render()
sleepTime = self.SEC_PER_FRAME - ( time.time() - current )
if sleepTime > 0:
time.sleep( sleepTime )
def _processInput(self):
if Keyboard.released(Keys.ESCAPE):
self._terminate()
def _update(self, dt):
Keyboard._update()
def _render(self):
# Clear terminal buffer
#os.system('cls' if os.name == 'nt' else 'clear')
print Keyboard.getKeyboardString()
| mit | 1,304,281,080,459,204,000 | 22.333333 | 70 | 0.543478 | false | 4.065657 | false | false | false |
samjo-nyang/samground | samground/trash/models.py | 1 | 1635 | from django.conf import settings
from django.db import models
import os
class Tfile(models.Model):
PERM_PUBLIC = 1
PERM_CONFIDENTIAL = 2
PERM_CLASSIFIED = 3
PERM_TYPES = (
(PERM_PUBLIC, 'Public'),
(PERM_CONFIDENTIAL, 'Confidential'),
(PERM_CLASSIFIED, 'Classified'),
)
LIMIT_NONE = 'N'
LIMIT_TIME = 'T'
LIMIT_ACCESS = 'A'
LIMIT_TYPES = (
(LIMIT_NONE, 'None'),
(LIMIT_TIME, 'Time'),
(LIMIT_ACCESS, 'Access'),
)
uid = models.CharField(primary_key=True, max_length=32)
name = models.CharField(max_length=255)
size = models.BigIntegerField()
perm = models.PositiveSmallIntegerField(choices=PERM_TYPES)
qid = models.CharField(max_length=6, null=True, blank=True)
access_count = models.IntegerField(default=0)
limit = models.CharField(max_length=1, choices=LIMIT_TYPES)
time_limit = models.DateTimeField(null=True, blank=True)
access_limit = models.IntegerField(default=0)
upload_time = models.DateTimeField(auto_now_add=True)
last_access_time = models.DateTimeField(auto_now=True)
def __str__(self):
return '{self.name}'.format(self=self)
def abs_path(self):
return os.path.join(settings.TRASH_PATH, self.uid)
class Meta:
verbose_name_plural = "tfiles"
class Penalty(models.Model):
ip = models.GenericIPAddressField(unique=True)
amount = models.IntegerField()
last_time = models.DateTimeField(auto_now=True)
def __str__(self):
return '{self.ip}: {self.amount}'.format(self=self)
class Meta:
verbose_name_plural = "penalties"
| mit | -3,573,165,053,851,866,600 | 28.196429 | 63 | 0.650153 | false | 3.478723 | false | false | false |
umich-brcf-bioinf/Connor | setup.py | 1 | 2313 | from __future__ import print_function
import os
import platform
import sys
from setuptools import find_packages, setup
import connor
_REQUIRED_PYTHON_VERSION = (2, 7)
def check_python_version():
if sys.version_info < _REQUIRED_PYTHON_VERSION:
msg_format = '''
Problem: Python v{0}.{1} or above is required but you are using v{2}.
Please install a supported version of Python and try again.\
'''
message = msg_format.format(_REQUIRED_PYTHON_VERSION[0],
_REQUIRED_PYTHON_VERSION[1],
platform.python_version())
print(message, file=sys.stderr)
sys.exit(1)
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as filename:
return filename.read()
check_python_version()
setup(name='Connor',
version = connor.__version__,
description=('Command-line tool to deduplicate reads in bam files based '
'on custom inline barcoding.'),
long_description=(read('README.rst') + '\n\n' +
read('doc/CHANGELOG.rst') + '\n\n' +
read('doc/AUTHORS.rst')),
url='https://github.com/umich-brcf-bioinf/Connor',
author='University of Michigan Bioinformatics Core',
author_email='bfx-connor@umich.edu',
license='Apache',
packages=find_packages(exclude=['test*']),
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics'],
keywords='bioinformatic exome-seq DNA-seq BAM',
setup_requires=['cython'],
install_requires=['pysam>=0.8.4', 'sortedcontainers>=1.5.3'],
entry_points={'console_scripts': ['connor=connor.connor:main']},
test_suite='nose.collector',
tests_require=['nose', 'pysam', 'testfixtures'],
zip_safe=False)
| apache-2.0 | 9,214,188,016,857,975,000 | 38.87931 | 79 | 0.587549 | false | 4.036649 | false | false | false |
tfgraph/tfgraph | tfgraph/algorithms/pagerank/transition/transition_reset_matrix.py | 1 | 4800 | import numpy as np
import tensorflow as tf
from tfgraph.algorithms.pagerank.transition.transition import Transition
from tfgraph.graph.graph import Graph
class TransitionResetMatrix(Transition):
""" Transition Matrix Class
This class implements the functionality of a 2-D matrix that represents the
probability distribution of walk between the vertices of the graph.
Attributes:
sess (:obj:`tf.Session`): This attribute represents the session that runs
the TensorFlow operations.
name (str): This attribute represents the name of the object in TensorFlow's
op Graph.
writer (:obj:`tf.summary.FileWriter`): This attribute represents a
TensorFlow's Writer, that is used to obtain stats.
is_sparse (bool): Use sparse Tensors if it's set to True. Not
implemented yet. Show the Todo.
G (:obj:`tfgraph.Graph`): The graph on which the transition is referred.
transition (:obj:`tf.Variable`): The 2-D `tf.Tensor` with the same shape as
adjacency matrix of the graph, that represents the probabilities to
move from one vertex to another.
beta (float): The reset probability of the random walks, i.e. the
probability that a user that surfs the graph an decides to jump to another
vertex not connected to the current.
"""
def __init__(self, sess: tf.Session, name: str, graph: Graph,
beta: float,
writer: tf.summary.FileWriter = None,
is_sparse: bool = False) -> None:
""" Constructor of the class.
This method is called to create a new instance of Transition class.
Args:
sess (:obj:`tf.Session`): This attribute represents the session that runs
the TensorFlow operations.
name (str): This attribute represents the name of the object in
TensorFlow's op Graph.
graph (:obj:`tfgraph.Graph`): The graph on which the transition is referred.
beta (float): The reset probability of the random walks, i.e. the
probability that a user that surfs the graph an decides to jump to
another vertex not connected to the current.
writer (:obj:`tf.summary.FileWriter`): This attribute represents a
TensorFlow's Writer, that is used to obtain stats.
is_sparse (bool): Use sparse Tensors if it's set to True. Not implemented
yet. Show the Todo.
"""
Transition.__init__(self, sess=sess, name=name, graph=graph, writer=writer,
is_sparse=is_sparse)
self.beta = beta
self.transition = tf.Variable(tf.add(
tf.scalar_mul(beta, tf.div(self.G.A_tf,
self.G.out_degrees_tf)),
(1 - beta) / self.G.n_tf),
name=self.name)
self.run_tf(tf.variables_initializer([self.transition]))
def get_tf(self, *args, **kwargs):
""" The method that returns the transition Tensor.
This method will return the transition matrix of the graph.
Args:
*args: The args of the `get_tf()` method.
**kwargs: The kwargs of the `get_tf()` method.
Returns:
(:obj:`tf.Tensor`): A `tf.Tensor` that contains the distribution of
transitions over vertices of the graph.
"""
return self.transition
def update_edge(self, edge: np.ndarray, change: float) -> None:
""" The callback to receive notifications about edge changes in the graph.
This method is called from the Graph when an addition or deletion is
produced on the edge set. So probably is necessary to recompute the
transition matrix.
Args:
edge (:obj:`np.ndarray`): A 1-D `np.ndarray` that represents the edge that
changes in the graph, where `edge[0]` is the source vertex, and
`edge[1]` the destination vertex.
change (float): The variation of the edge weight. If the final value is
0.0 then the edge is removed.
Returns:
This method returns nothing.
"""
if change > 0.0:
self.run_tf(tf.scatter_nd_update(
self.transition, [[edge[0]]],
tf.add(
tf.scalar_mul(
self.beta,
tf.div(
self.G.A_tf_vertex(edge[0]),
self.G.out_degrees_tf_vertex(edge[0]))),
(1 - self.beta) / self.G.n_tf)))
else:
self.run_tf(tf.scatter_nd_update(
self.transition, [[edge[0]]],
tf.where(self.G.is_not_sink_tf_vertex(edge[0]),
tf.add(
tf.scalar_mul(
self.beta,
tf.div(
self.G.A_tf_vertex(edge[0]),
self.G.out_degrees_tf_vertex(edge[0]))),
(
1 - self.beta) / self.G.n_tf),
tf.fill([1, self.G.n], tf.pow(self.G.n_tf, -1)))))
self._notify(edge, change)
| apache-2.0 | -4,060,738,975,726,950,400 | 37.709677 | 83 | 0.624167 | false | 4.067797 | false | false | false |
bhargavz/py-twitter-sentiment-analysis | data/db/base/userObj.py | 1 | 2713 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: userObj.py
#
# An object that mirrors the user data table in the database
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
#
from datetime import datetime
import copy
class userObj(object):
def __init__(self):
self.rid = None
self.user_name = ""
self.screen_name = ""
self.user_id = 0
self.join_dt = datetime(2000,1,1,0,0,0,0)
self.verified = False
self.geo_enabled = False
self.location = ""
self.lang = ""
self.time_zone = ""
self.url = ""
self.description = ""
def to_dict(self):
rec = {}
if( self.rid > 0 ):
rec['rid'] = self.rid
rec['user_name'] = self.user_name
rec['screen_name'] = self.screen_name
rec['user_id'] = self.user_id
rec['join_dt'] = self.join_dt
rec['verified'] = self.verified
rec['geo_enabled'] = self.geo_enabled
rec['location'] = self.location
rec['lang'] = self.lang
rec['time_zone'] = self.time_zone
rec['url'] = self.url
rec['description'] = self.description
return rec
def from_dict(self, rec):
nobj = userObj()
if( rec ):
nobj.user_name = rec['user_name']
nobj.screen_name = rec['screen_name']
nobj.user_id = rec['user_id']
nobj.join_dt = rec['join_dt']
nobj.verified = rec['verified']
nobj.geo_enabled = rec['geo_enabled']
nobj.location = rec['location']
nobj.lang = rec['lang']
nobj.time_zone = rec['time_zone']
nobj.url = rec['url']
nobj.description = rec['description']
return nobj
def clone(self):
nobj = userObj()
if( self.rid > 0 ):
nobj.rid = self.rid
nobj.user_name = self.user_name
nobj.screen_name = self.screen_name
nobj.user_id = self.user_id
nobj.join_dt = self.join_dt
nobj.verified = self.verified
nobj.geo_enabled = self.geo_enabled
nobj.location = self.location
nobj.lang = self.lang
nobj.time_zone = self.time_zone
nobj.url = self.url
nobj.description = self.description
return nobj
def __repr__(self):
return "<userObj('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')>"%(str(self.rid),str(self.user_name),str(self.screen_name),str(self.user_id),str(self.join_dt),str(self.verified),str(self.geo_enabled),str(self.location),str(self.lang),str(self.time_zone),str(self.url),str(self.description))
| mit | -5,726,932,227,834,109,000 | 31.686747 | 312 | 0.542573 | false | 3.37858 | false | false | false |
mauropalumbo75/pyqha | pyqha/plotutils.py | 1 | 6080 | #encoding: UTF-8
# Copyright (C) 2016 Mauro Palumbo
# This file is distributed under the terms of the # MIT License.
# See the file `License' in the root directory of the present distribution.
"""
A collection of wrappers for the *matplotlib* functions.
.. Note::
All functions return a *matplotlib* which can be modified by the user.
"""
try:
wx
from matplotlib import use
use('WXAgg')
except:
pass
import matplotlib.pyplot as plt
import numpy as np
from .eos import calculate_fitted_points
def simple_plot_xy(x,y,xlabel="",ylabel=""):
"""
This function generates a simple xy plot with matplotlib.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
ax.plot(x, y, 'r')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
return fig
def multiple_plot_xy(x,y,xlabel="",ylabel="",labels=""):
"""
This function generates a simple xy plot with matplotlib overlapping several
lines as in the matrix y. y second index refers to a line in the plot, the first
index is for the array to be plotted.
"""
if (len(y[0,:])>7):
print ("Too many data on y axis!")
return
colors = ['k','r','b','g','c','m','y']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
if (labels==""):
try: # try if there are multiple data on x axis
for i in range(0,len(y[0,:])):
ax.plot(x[:,i], y[:,i], colors[i])
except: # if not use a single x axis
for i in range(0,len(y[0,:])):
ax.plot(x, y[:,i], colors[i])
else:
try: # try if there are multiple data on x axis
for i in range(0,len(y[0,:])):
ax.plot(x[:,i], y[:,i], colors[i],label=labels[i])
except: # if not use a single x axis
for i in range(0,len(y[0,:])):
ax.plot(x, y[:,i], colors[i],label=labels[i])
ax.legend()
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
return fig
def plot_EV(V,E,a=None,labely="Etot"):
"""
This function plots with matplotlib E(V) data and if a is given it also plot
the fitted results
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
ax.plot(V, E, 'o', label=labely+" data", markersize=10)
if (a!=None):
Vdense, Edensefitted = calculate_fitted_points(V,a)
ax.plot(Vdense, Edensefitted, 'r', label='Fitted EOS')
ax.legend()
ax.set_xlabel('V (a.u.^3)')
ax.set_ylabel('E (Ry)')
plt.show()
return fig
def plot_Etot(celldmsx,Ex,n,nmesh=(50,50,50),fittype="quadratic",ibrav=4,a=None):
"""
This function makes a 3D plot with matplotlib Ex(celldmsx) data and if a is given it also plot
the fitted results. The plot type depends on ibrav.
"""
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from .minutils import calculate_fitted_points_anis
if (Ex==None) and (a==None):
return
fig = plt.figure()
if (ibrav==4): # hex case
ax = fig.gca(projection='3d')
if (Ex!=None):
na=n[0]
nc=n[2]
X = np.zeros((na,nc))
Y = np.zeros((na,nc))
Z = np.zeros((na,nc))
for j in range(0,nc):
for i in range(0,na):
index = j*na+i
X[i,j] = celldmsx[index,0]
Y[i,j] = celldmsx[index,2]
Z[i,j] = Ex[index]
#print (index,X[i,j],Y[i,j],Z[i,j])
ax.set_xlim(X.min(),X.max())
ax.set_ylim(Y.min(),Y.max())
ax.set_zlim(Z.min(),Z.max())
ax.scatter(X,Y,Z,c='r',marker='o')
if (a!=None):
celldmsxdense, Edensefitted = calculate_fitted_points_anis(celldmsx,nmesh,fittype,ibrav,a)
Xd = np.zeros((nmesh[0],nmesh[2]))
Yd = np.zeros((nmesh[0],nmesh[2]))
Zd = np.zeros((nmesh[0],nmesh[2]))
for i in range(0,nmesh[0]):
for j in range(0,nmesh[2]):
index = i*nmesh[0]+j
Xd[i,j] = celldmsxdense[index,0]
Yd[i,j] = celldmsxdense[index,2]
Zd[i,j] = Edensefitted[index]
ax.set_xlim(Xd.min(),Xd.max())
ax.set_ylim(Yd.min(),Yd.max())
ax.set_zlim(Zd.min(),Zd.max())
ax.plot_surface(Xd, Yd, Zd, rstride=1, cstride=1, alpha=0.3)
cset = ax.contour(Xd, Yd, Zd, zdir='z', offset=Zd.min(), cmap=cm.coolwarm)
ax.set_xlabel("a (a.u.)")
ax.set_ylabel("c (a.u.)")
ax.set_zlabel("Etot (Ry)")
plt.show()
return fig
def plot_Etot_contour(celldmsx,nmesh=(50,50,50),fittype="quadratic",ibrav=4,a=None):
"""
This function makes a countour plot with matplotlib of Ex(celldmsx) fitted results.
The plot type depends on ibrav.
"""
from .minutils import calculate_fitted_points_anis
if a==None:
return
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
if (ibrav==4):
celldmsxdense, Edensefitted = calculate_fitted_points_anis(celldmsx,nmesh,fittype,ibrav,a)
Xd = np.zeros((nmesh[0],nmesh[2]))
Yd = np.zeros((nmesh[0],nmesh[2]))
Zd = np.zeros((nmesh[0],nmesh[2]))
for i in range(0,nmesh[0]):
for j in range(0,nmesh[2]):
index = i*nmesh[0]+j
Xd[i,j] = celldmsxdense[index,0]
Yd[i,j] = celldmsxdense[index,2]
Zd[i,j] = Edensefitted[index]
CS = ax.contour(Xd, Yd, Zd)
plt.clabel(CS, inline=1, fontsize=10)
CS.ax.set_xlabel("a (a.u.)")
CS.ax.set_ylabel("c (a.u.)")
plt.show()
return fig
| mit | 8,260,005,133,392,983,000 | 29.552764 | 102 | 0.530592 | false | 3.119548 | false | false | false |
redhat-cip/dci-agent | dci_agent/ansible/library/dci_upload.py | 1 | 4320 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import * # noqa
from dciclient.v1.api import context as dci_context
from dciclient.v1.api import file as dci_file
import mimetypes
import os
DOCUMENTATION = '''
---
module: dci_upload
short_description: upload logs from local directory to the dci control server
'''
EXAMPLES = '''
- name: upload local logs
dci_upload:
src_dir: <path-logs-directory>
dci_login: <dci login>
dci_password: <dci password>
dci_cs_url: <dci cs url>
dci_status: <dci status>
job_id: <job id>
'''
def upload(context, dci_status, job_id, src_dir=None, file_path=None):
"""This function upload logs to dci control server from local logs
directory.
"""
def upload_directory():
status_dir = '%s/%s' % (src_dir, dci_status)
jobstate_id = open('%s/jobstate_id' % status_dir).read()
logs_files = os.listdir(status_dir)
# sort the file by date in order to upload the files in order
logs_files_paths = [os.path.join(status_dir, f) for f in logs_files]
logs_files_paths.sort(key=lambda x: os.path.getmtime(x))
for log_file_path in logs_files_paths:
name = os.path.basename(log_file_path)
if not log_file_path.endswith('jobstate_id'):
# junit file a associated directly to the job
if log_file_path.endswith('.junit'):
dci_file.create_with_stream(context, name=name,
file_path=log_file_path,
mime='application/junit',
job_id=job_id)
else:
dci_file.create_with_stream(context, name=name,
file_path=log_file_path,
jobstate_id=jobstate_id)
os.remove(log_file_path)
return {'uploaded': logs_files}
def upload_file():
if not os.path.exists(file_path):
return {'failed': True,
'msg': 'file %s does not exist' % file_path}
mimetype, _ = mimetypes.guess_type(file_path)
mimetype = mimetype or 'text/plain'
name = os.path.basename(file_path)
dci_file.create_with_stream(context, name=name,
file_path=file_path,
mime=mimetype,
job_id=job_id)
return {'uploaded': file_path}
if src_dir is not None:
return upload_directory()
else:
return upload_file()
def main():
fields = {
"src_dir": {"required": False, "type": "str"},
"file": {"required": False, "type": "str"},
"dci_status": {"required": True, "type": "str"},
"dci_login": {"required": True, "type": "str"},
"dci_password": {"required": True, "type": "str"},
"dci_cs_url": {"required": True, "type": "str"},
"job_id": {"required": True, "type": "str"}
}
module = AnsibleModule(argument_spec=fields)
src_dir = module.params.get('src_dir')
file = module.params.get('file')
dci_status = module.params['dci_status']
dci_login = module.params['dci_login']
dci_password = module.params['dci_password']
dci_cs_url = module.params['dci_cs_url']
job_id = module.params['job_id']
_dci_context = dci_context.build_dci_context(
dci_cs_url,
dci_login,
dci_password
)
response = upload(_dci_context, dci_status, job_id, src_dir, file)
module.exit_json(changed=True, meta=response)
if __name__ == '__main__':
main()
| apache-2.0 | -196,556,626,459,693,900 | 33.83871 | 77 | 0.574769 | false | 3.642496 | false | false | false |
daenney/err-githubhook | providers.py | 1 | 8711 | import hashlib
import hmac
from errbot.templating import tenv
GITHUB_EVENTS = ['commit_comment', 'create', 'delete', 'deployment',
'deployment_status', 'fork', 'gollum', 'issue_comment',
'issues', 'member', 'page_build', 'public',
'pull_request_review_comment', 'pull_request', 'push',
'release', 'status', 'team_add', 'watch', '*']
GITLAB_EVENTS = ['push_hook', 'tag_push_hook', 'issue_hook', 'note_hook', 'merge_request_hook']
SUPPORTED_EVENTS = GITHUB_EVENTS + GITLAB_EVENTS
DEFAULT_EVENTS = ['commit_comment', 'issue_comment', 'issues', 'pull_request_review_comment',
'pull_request', 'push', 'push_hook', 'tag_push_hook', 'issue_hook',
'note_hook', 'merge_request_hook']
class CommonGitWebProvider(object):
def create_message(self, body, event_type, repo):
"""
Dispatch the message. Check explicitly with hasattr first. When
using a try/catch with AttributeError errors in the
message_function which result in an AttributeError would cause
us to call msg_generic, which is not what we want.
"""
message_function = 'msg_{0}'.format(event_type)
if hasattr(self, message_function):
message = getattr(self, message_function)(body, repo)
else:
message = self.msg_generic(body, repo, event_type)
return message
def render_template(self, template='generic', **kwargs):
kwargs['repo_name'] = kwargs.get('repo_name') or self.name
return tenv().get_template('{0}.html'.format(template)).render(**kwargs)
def msg_generic(self, body, repo, event_type):
return self.render_template(
template='generic', body=body, repo=repo, event_type=event_type)
class GithubHandlers(CommonGitWebProvider):
name = 'Github'
@staticmethod
def valid_message(request, token):
"""Validate the signature of the incoming payload.
The header received from Github is in the form of algorithm=hash.
"""
# TODO: Fix GitLab token validation:
# https://docs.gitlab.com/ce/web_hooks/web_hooks.html#secret-token
signature = request.get_header('X-Hub-Signature')
if signature is None:
return False
try:
alg, sig = signature.split('=')
except ValueError:
return False
if alg != 'sha1':
return False
message = request.body.read()
mac = hmac.new(token.encode(), msg=message, digestmod=hashlib.sha1).hexdigest()
return hmac.compare_digest(mac, sig)
def get_repo(self, body):
return body['repository']['full_name']
def msg_issues(self, body, repo):
return self.render_template(
template='issues', body=body, repo=repo,
action=body['action'],
number=body['issue']['number'],
title=body['issue']['title'],
user=body['issue']['user']['login'],
url=body['issue']['url'],
is_assigned=body['issue']['assignee'],
assignee=body['issue']['assignee']['login'] if body['issue']['assignee'] else None
)
def msg_pull_request(self, body, repo):
action = body['action']
user = body['pull_request']['user']['login']
if action == 'closed' and body['pull_request']['merged']:
user = body['pull_request']['merged_by']['login']
action = 'merged'
if action == 'synchronize':
action = 'updated'
return self.render_template(
template='pull_request', body=body, repo=repo,
action=action, user=user,
number=body['pull_request']['number'],
url=body['pull_request']['html_url'],
merged=body['pull_request']['merged'],
)
def msg_pull_request_review_comment(self, body, repo):
return self.render_template(
template='pull_request_review_comment', body=body, repo=repo,
action='commented' if body['action'] == 'created' else body['action'],
user=body['comment']['user']['login'],
line=body['comment']['position'],
l_url=body['comment']['html_url'],
pr=body['pull_request']['number'],
url=body['pull_request']['html_url'],
)
def msg_push(self, body, repo):
return self.render_template(
template='push', body=body, repo=repo,
user=body['pusher']['name'],
commits=len(body['commits']),
branch=body['ref'].split('/')[-1],
url=body['compare'],
)
def msg_status(*args):
"""Status events are crazy and free form. There's no sane, consistent
or logical way to deal with them."""
return None
def msg_issue_comment(self, body, repo):
return self.render_template(
template='issue_comment', body=body, repo=repo,
action='commented' if body['action'] == 'created' else body['action'],
user=body['comment']['user']['login'],
number=body['issue']['number'],
title=body['issue']['title'],
url=body['issue']['html_url'],
)
def msg_commit_comment(self, body, repo):
return self.render_template(
template='commit_comment', body=body, repo=repo,
user=body['comment']['user']['login'],
url=body['comment']['html_url'],
line=body['comment']['line'],
sha=body['comment']['commit_id'],
)
class GitLabHandlers(CommonGitWebProvider):
name = 'GitLab'
@staticmethod
def valid_message(request, token):
"""Validate the signature of the incoming payload.
The header received from GitLab is in the form of algorithm=hash.
# TODO: Fix GitLab token validation:
# https://docs.gitlab.com/ce/web_hooks/web_hooks.html#secret-token
"""
signature = request.get_header('X-Gitlab-Token')
return True
def get_repo(self, body):
return body['project']['name']
def map_event_type(self, event_type):
return {
'push_hook': 'push',
'issue_hook': 'issue',
'note_hook': 'comment',
}.get(event_type)
def create_message(self, body, event_type, repo):
mapped_event_type = self.map_event_type(event_type)
return super(GitLabHandlers, self).create_message(body, mapped_event_type, repo)
def msg_push(self, body, repo):
if body['commits']:
last_commit_url = body['commits'][-1]['url']
commit_messages = [
dict(msg=c['message'][:80].split('\n')[0], hash=c['id'][:8],
url=c['url']) for c in body['commits']
]
else:
last_commit_url = body['project']['web_url']
commit_messages = []
return self.render_template(
template='push', body=body, repo=repo,
user=body['user_name'],
commits=len(body['commits']),
branch='/'.join(body['ref'].split('/')[2:]),
url=last_commit_url,
commit_messages=commit_messages,
)
def msg_issue(self, body, repo):
action = {'reopen': 'reopened', 'close': 'closed', 'open': 'opened'}.get(body['object_attributes']['action'])
if action:
return self.render_template(
template='issues', body=body, repo=repo,
action=action,
title=body['object_attributes']['title'],
user=body['user']['name'],
url=body['object_attributes']['url']
)
def msg_comment(self, body, repo):
noteable = body['object_attributes']['noteable_type'].lower()
if noteable == "issue":
return self.render_template(
template='issue_comment', body=body, repo=repo,
user=body['user']['name'],
url=body['object_attributes']['url'],
action='commented',
title=body['issue']['title']
)
elif noteable == "commit":
return self.render_template(
template='commit_comment', body=body, repo=repo,
user=body['user']['name'],
url=body['object_attributes']['url'],
line=None,
)
elif noteable == "mergerequest":
return self.render_template(
template='merge_request_comment', body=body, repo=repo,
user=body['user']['name'],
url=body['object_attributes']['url'],
)
| gpl-3.0 | -17,808,484,313,198,146 | 37.039301 | 117 | 0.555505 | false | 4.118676 | false | false | false |
sbkolate/sap_frappe_v6 | frappe/desk/doctype/feed/feed.py | 3 | 3188 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
from frappe.utils import get_fullname
from frappe import _
exclude_from_linked_with = True
class Feed(Document):
no_feed_on_delete = True
def validate(self):
if not (self.reference_doctype and self.reference_name):
# reset both if even one is missing
self.reference_doctype = self.reference_name = None
def on_doctype_update():
if not frappe.db.sql("""show index from `tabFeed`
where Key_name="feed_doctype_docname_index" """):
frappe.db.commit()
frappe.db.sql("""alter table `tabFeed`
add index feed_doctype_docname_index(doc_type, doc_name)""")
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
use_user_permissions = frappe.permissions.apply_user_permissions("Feed", "read", user)
if not use_user_permissions:
return ""
conditions = ['`tabFeed`.owner="{user}" or `tabFeed`.doc_owner="{user}"'.format(user=frappe.db.escape(user))]
user_permissions = frappe.defaults.get_user_permissions(user)
can_read = frappe.get_user().get_can_read()
can_read_doctypes = ['"{}"'.format(doctype) for doctype in
list(set(can_read) - set(user_permissions.keys()))]
if not can_read_doctypes:
conditions += ["tabFeed.doc_type in ({})".format(", ".join(can_read_doctypes))]
if user_permissions:
can_read_docs = []
for doctype, names in user_permissions.items():
for n in names:
can_read_docs.append('"{}|{}"'.format(doctype, n))
if can_read_docs:
conditions.append("concat_ws('|', tabFeed.doc_type, tabFeed.doc_name) in ({})".format(
", ".join(can_read_docs)))
return "(" + " or ".join(conditions) + ")"
def has_permission(doc, user):
return frappe.has_permission(doc.doc_type, "read", doc.doc_name, user=user)
def update_feed(doc, method=None):
"adds a new feed"
if frappe.flags.in_patch or frappe.flags.in_install or frappe.flags.in_import:
return
if doc.doctype == "Feed" or doc.meta.issingle:
return
if hasattr(doc, "get_feed"):
feed = doc.get_feed()
if feed:
if isinstance(feed, basestring):
feed = {"subject": feed}
feed = frappe._dict(feed)
doctype = feed.doctype or doc.doctype
name = feed.name or doc.name
# delete earlier feed
frappe.db.sql("""delete from tabFeed
where doc_type=%s and doc_name=%s
and ifnull(feed_type,'')=''""", (doctype, name))
frappe.get_doc({
"doctype": "Feed",
"feed_type": feed.feed_type or "",
"doc_type": doctype,
"doc_name": name,
"subject": feed.subject,
"full_name": get_fullname(doc.owner),
"doc_owner": frappe.db.get_value(doctype, name, "owner"),
"reference_doctype": feed.reference_doctype,
"reference_name": feed.reference_name
}).insert(ignore_permissions=True)
def login_feed(login_manager):
frappe.get_doc({
"doctype": "Feed",
"feed_type": "Login",
"subject": _("{0} logged in").format(get_fullname(login_manager.user)),
"full_name": get_fullname(login_manager.user)
}).insert(ignore_permissions=True)
| mit | 5,286,188,534,838,085,000 | 29.361905 | 110 | 0.683814 | false | 3.092144 | false | false | false |
Shouqun/node-gn | tools/depot_tools/gerrit_util.py | 1 | 30246 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for requesting information for a gerrit server via https.
https://gerrit-review.googlesource.com/Documentation/rest-api.html
"""
import base64
import contextlib
import cookielib
import httplib # Still used for its constants.
import json
import logging
import netrc
import os
import re
import socket
import stat
import sys
import tempfile
import time
import urllib
import urlparse
from cStringIO import StringIO
import gclient_utils
import subprocess2
from third_party import httplib2
LOGGER = logging.getLogger()
# With a starting sleep time of 1 second, 2^n exponential backoff, and six
# total tries, the sleep time between the first and last tries will be 31s.
TRY_LIMIT = 6
# Controls the transport protocol used to communicate with gerrit.
# This is parameterized primarily to enable GerritTestCase.
GERRIT_PROTOCOL = 'https'
class GerritError(Exception):
"""Exception class for errors commuicating with the gerrit-on-borg service."""
def __init__(self, http_status, *args, **kwargs):
super(GerritError, self).__init__(*args, **kwargs)
self.http_status = http_status
self.message = '(%d) %s' % (self.http_status, self.message)
class GerritAuthenticationError(GerritError):
"""Exception class for authentication errors during Gerrit communication."""
def _QueryString(params, first_param=None):
"""Encodes query parameters in the key:val[+key:val...] format specified here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
"""
q = [urllib.quote(first_param)] if first_param else []
q.extend(['%s:%s' % (key, val) for key, val in params])
return '+'.join(q)
def GetConnectionObject(protocol=None):
if protocol is None:
protocol = GERRIT_PROTOCOL
if protocol in ('http', 'https'):
return httplib2.Http()
else:
raise RuntimeError(
"Don't know how to work with protocol '%s'" % protocol)
class Authenticator(object):
"""Base authenticator class for authenticator implementations to subclass."""
def get_auth_header(self, host):
raise NotImplementedError()
@staticmethod
def get():
"""Returns: (Authenticator) The identified Authenticator to use.
Probes the local system and its environment and identifies the
Authenticator instance to use.
"""
if GceAuthenticator.is_gce():
return GceAuthenticator()
return CookiesAuthenticator()
class CookiesAuthenticator(Authenticator):
"""Authenticator implementation that uses ".netrc" or ".gitcookies" for token.
Expected case for developer workstations.
"""
def __init__(self):
self.netrc = self._get_netrc()
self.gitcookies = self._get_gitcookies()
@classmethod
def get_new_password_url(cls, host):
assert not host.startswith('http')
# Assume *.googlesource.com pattern.
parts = host.split('.')
if not parts[0].endswith('-review'):
parts[0] += '-review'
return 'https://%s/new-password' % ('.'.join(parts))
@classmethod
def get_new_password_message(cls, host):
assert not host.startswith('http')
# Assume *.googlesource.com pattern.
parts = host.split('.')
if not parts[0].endswith('-review'):
parts[0] += '-review'
url = 'https://%s/new-password' % ('.'.join(parts))
return 'You can (re)generate your credentials by visiting %s' % url
@classmethod
def get_netrc_path(cls):
path = '_netrc' if sys.platform.startswith('win') else '.netrc'
return os.path.expanduser(os.path.join('~', path))
@classmethod
def _get_netrc(cls):
# Buffer the '.netrc' path. Use an empty file if it doesn't exist.
path = cls.get_netrc_path()
content = ''
if os.path.exists(path):
st = os.stat(path)
if st.st_mode & (stat.S_IRWXG | stat.S_IRWXO):
print >> sys.stderr, (
'WARNING: netrc file %s cannot be used because its file '
'permissions are insecure. netrc file permissions should be '
'600.' % path)
with open(path) as fd:
content = fd.read()
# Load the '.netrc' file. We strip comments from it because processing them
# can trigger a bug in Windows. See crbug.com/664664.
content = '\n'.join(l for l in content.splitlines()
if l.strip() and not l.strip().startswith('#'))
with tempdir() as tdir:
netrc_path = os.path.join(tdir, 'netrc')
with open(netrc_path, 'w') as fd:
fd.write(content)
os.chmod(netrc_path, (stat.S_IRUSR | stat.S_IWUSR))
return cls._get_netrc_from_path(netrc_path)
@classmethod
def _get_netrc_from_path(cls, path):
try:
return netrc.netrc(path)
except IOError:
print >> sys.stderr, 'WARNING: Could not read netrc file %s' % path
return netrc.netrc(os.devnull)
except netrc.NetrcParseError as e:
print >> sys.stderr, ('ERROR: Cannot use netrc file %s due to a '
'parsing error: %s' % (path, e))
return netrc.netrc(os.devnull)
@classmethod
def get_gitcookies_path(cls):
if os.getenv('GIT_COOKIES_PATH'):
return os.getenv('GIT_COOKIES_PATH')
try:
return subprocess2.check_output(
['git', 'config', '--path', 'http.cookiefile']).strip()
except subprocess2.CalledProcessError:
return os.path.join(os.environ['HOME'], '.gitcookies')
@classmethod
def _get_gitcookies(cls):
gitcookies = {}
path = cls.get_gitcookies_path()
if not os.path.exists(path):
return gitcookies
try:
f = open(path, 'rb')
except IOError:
return gitcookies
with f:
for line in f:
try:
fields = line.strip().split('\t')
if line.strip().startswith('#') or len(fields) != 7:
continue
domain, xpath, key, value = fields[0], fields[2], fields[5], fields[6]
if xpath == '/' and key == 'o':
login, secret_token = value.split('=', 1)
gitcookies[domain] = (login, secret_token)
except (IndexError, ValueError, TypeError) as exc:
LOGGER.warning(exc)
return gitcookies
def _get_auth_for_host(self, host):
for domain, creds in self.gitcookies.iteritems():
if cookielib.domain_match(host, domain):
return (creds[0], None, creds[1])
return self.netrc.authenticators(host)
def get_auth_header(self, host):
auth = self._get_auth_for_host(host)
if auth:
return 'Basic %s' % (base64.b64encode('%s:%s' % (auth[0], auth[2])))
return None
def get_auth_email(self, host):
"""Best effort parsing of email to be used for auth for the given host."""
auth = self._get_auth_for_host(host)
if not auth:
return None
login = auth[0]
# login typically looks like 'git-xxx.example.com'
if not login.startswith('git-') or '.' not in login:
return None
username, domain = login[len('git-'):].split('.', 1)
return '%s@%s' % (username, domain)
# Backwards compatibility just in case somebody imports this outside of
# depot_tools.
NetrcAuthenticator = CookiesAuthenticator
class GceAuthenticator(Authenticator):
"""Authenticator implementation that uses GCE metadata service for token.
"""
_INFO_URL = 'http://metadata.google.internal'
_ACQUIRE_URL = ('%s/computeMetadata/v1/instance/'
'service-accounts/default/token' % _INFO_URL)
_ACQUIRE_HEADERS = {"Metadata-Flavor": "Google"}
_cache_is_gce = None
_token_cache = None
_token_expiration = None
@classmethod
def is_gce(cls):
if os.getenv('SKIP_GCE_AUTH_FOR_GIT'):
return False
if cls._cache_is_gce is None:
cls._cache_is_gce = cls._test_is_gce()
return cls._cache_is_gce
@classmethod
def _test_is_gce(cls):
# Based on https://cloud.google.com/compute/docs/metadata#runninggce
try:
resp, _ = cls._get(cls._INFO_URL)
except (socket.error, httplib2.ServerNotFoundError):
# Could not resolve URL.
return False
return resp.get('metadata-flavor') == 'Google'
@staticmethod
def _get(url, **kwargs):
next_delay_sec = 1
for i in xrange(TRY_LIMIT):
p = urlparse.urlparse(url)
c = GetConnectionObject(protocol=p.scheme)
resp, contents = c.request(url, 'GET', **kwargs)
LOGGER.debug('GET [%s] #%d/%d (%d)', url, i+1, TRY_LIMIT, resp.status)
if resp.status < httplib.INTERNAL_SERVER_ERROR:
return (resp, contents)
# Retry server error status codes.
LOGGER.warn('Encountered server error')
if TRY_LIMIT - i > 1:
LOGGER.info('Will retry in %d seconds (%d more times)...',
next_delay_sec, TRY_LIMIT - i - 1)
time.sleep(next_delay_sec)
next_delay_sec *= 2
@classmethod
def _get_token_dict(cls):
if cls._token_cache:
# If it expires within 25 seconds, refresh.
if cls._token_expiration < time.time() - 25:
return cls._token_cache
resp, contents = cls._get(cls._ACQUIRE_URL, headers=cls._ACQUIRE_HEADERS)
if resp.status != httplib.OK:
return None
cls._token_cache = json.loads(contents)
cls._token_expiration = cls._token_cache['expires_in'] + time.time()
return cls._token_cache
def get_auth_header(self, _host):
token_dict = self._get_token_dict()
if not token_dict:
return None
return '%(token_type)s %(access_token)s' % token_dict
def CreateHttpConn(host, path, reqtype='GET', headers=None, body=None):
"""Opens an https connection to a gerrit service, and sends a request."""
headers = headers or {}
bare_host = host.partition(':')[0]
auth = Authenticator.get().get_auth_header(bare_host)
if auth:
headers.setdefault('Authorization', auth)
else:
LOGGER.debug('No authorization found for %s.' % bare_host)
url = path
if not url.startswith('/'):
url = '/' + url
if 'Authorization' in headers and not url.startswith('/a/'):
url = '/a%s' % url
if body:
body = json.JSONEncoder().encode(body)
headers.setdefault('Content-Type', 'application/json')
if LOGGER.isEnabledFor(logging.DEBUG):
LOGGER.debug('%s %s://%s%s' % (reqtype, GERRIT_PROTOCOL, host, url))
for key, val in headers.iteritems():
if key == 'Authorization':
val = 'HIDDEN'
LOGGER.debug('%s: %s' % (key, val))
if body:
LOGGER.debug(body)
conn = GetConnectionObject()
conn.req_host = host
conn.req_params = {
'uri': urlparse.urljoin('%s://%s' % (GERRIT_PROTOCOL, host), url),
'method': reqtype,
'headers': headers,
'body': body,
}
return conn
def ReadHttpResponse(conn, accept_statuses=frozenset([200])):
"""Reads an http response from a connection into a string buffer.
Args:
conn: An Http object created by CreateHttpConn above.
accept_statuses: Treat any of these statuses as success. Default: [200]
Common additions include 204, 400, and 404.
Returns: A string buffer containing the connection's reply.
"""
sleep_time = 1
for idx in range(TRY_LIMIT):
response, contents = conn.request(**conn.req_params)
# Check if this is an authentication issue.
www_authenticate = response.get('www-authenticate')
if (response.status in (httplib.UNAUTHORIZED, httplib.FOUND) and
www_authenticate):
auth_match = re.search('realm="([^"]+)"', www_authenticate, re.I)
host = auth_match.group(1) if auth_match else conn.req_host
reason = ('Authentication failed. Please make sure your .gitcookies file '
'has credentials for %s' % host)
raise GerritAuthenticationError(response.status, reason)
# If response.status < 500 then the result is final; break retry loop.
# If the response is 404, it might be because of replication lag, so
# keep trying anyway.
if ((response.status < 500 and response.status != 404)
or response.status in accept_statuses):
LOGGER.debug('got response %d for %s %s', response.status,
conn.req_params['method'], conn.req_params['uri'])
# If 404 was in accept_statuses, then it's expected that the file might
# not exist, so don't return the gitiles error page because that's not the
# "content" that was actually requested.
if response.status == 404:
contents = ''
break
# A status >=500 is assumed to be a possible transient error; retry.
http_version = 'HTTP/%s' % ('1.1' if response.version == 11 else '1.0')
LOGGER.warn('A transient error occurred while querying %s:\n'
'%s %s %s\n'
'%s %d %s',
conn.req_host, conn.req_params['method'],
conn.req_params['uri'],
http_version, http_version, response.status, response.reason)
if TRY_LIMIT - idx > 1:
LOGGER.info('Will retry in %d seconds (%d more times)...',
sleep_time, TRY_LIMIT - idx - 1)
time.sleep(sleep_time)
sleep_time = sleep_time * 2
if response.status not in accept_statuses:
if response.status in (401, 403):
print('Your Gerrit credentials might be misconfigured. Try: \n'
' git cl creds-check')
reason = '%s: %s' % (response.reason, contents)
raise GerritError(response.status, reason)
return StringIO(contents)
def ReadHttpJsonResponse(conn, accept_statuses=frozenset([200])):
"""Parses an https response as json."""
fh = ReadHttpResponse(conn, accept_statuses)
# The first line of the response should always be: )]}'
s = fh.readline()
if s and s.rstrip() != ")]}'":
raise GerritError(200, 'Unexpected json output: %s' % s)
s = fh.read()
if not s:
return None
return json.loads(s)
def QueryChanges(host, params, first_param=None, limit=None, o_params=None,
start=None):
"""
Queries a gerrit-on-borg server for changes matching query terms.
Args:
params: A list of key:value pairs for search parameters, as documented
here (e.g. ('is', 'owner') for a parameter 'is:owner'):
https://gerrit-review.googlesource.com/Documentation/user-search.html#search-operators
first_param: A change identifier
limit: Maximum number of results to return.
start: how many changes to skip (starting with the most recent)
o_params: A list of additional output specifiers, as documented here:
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
Returns:
A list of json-decoded query results.
"""
# Note that no attempt is made to escape special characters; YMMV.
if not params and not first_param:
raise RuntimeError('QueryChanges requires search parameters')
path = 'changes/?q=%s' % _QueryString(params, first_param)
if start:
path = '%s&start=%s' % (path, start)
if limit:
path = '%s&n=%d' % (path, limit)
if o_params:
path = '%s&%s' % (path, '&'.join(['o=%s' % p for p in o_params]))
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GenerateAllChanges(host, params, first_param=None, limit=500,
o_params=None, start=None):
"""
Queries a gerrit-on-borg server for all the changes matching the query terms.
WARNING: this is unreliable if a change matching the query is modified while
this function is being called.
A single query to gerrit-on-borg is limited on the number of results by the
limit parameter on the request (see QueryChanges) and the server maximum
limit.
Args:
params, first_param: Refer to QueryChanges().
limit: Maximum number of requested changes per query.
o_params: Refer to QueryChanges().
start: Refer to QueryChanges().
Returns:
A generator object to the list of returned changes.
"""
already_returned = set()
def at_most_once(cls):
for cl in cls:
if cl['_number'] not in already_returned:
already_returned.add(cl['_number'])
yield cl
start = start or 0
cur_start = start
more_changes = True
while more_changes:
# This will fetch changes[start..start+limit] sorted by most recently
# updated. Since the rank of any change in this list can be changed any time
# (say user posting comment), subsequent calls may overalp like this:
# > initial order ABCDEFGH
# query[0..3] => ABC
# > E get's updated. New order: EABCDFGH
# query[3..6] => CDF # C is a dup
# query[6..9] => GH # E is missed.
page = QueryChanges(host, params, first_param, limit, o_params,
cur_start)
for cl in at_most_once(page):
yield cl
more_changes = [cl for cl in page if '_more_changes' in cl]
if len(more_changes) > 1:
raise GerritError(
200,
'Received %d changes with a _more_changes attribute set but should '
'receive at most one.' % len(more_changes))
if more_changes:
cur_start += len(page)
# If we paged through, query again the first page which in most circumstances
# will fetch all changes that were modified while this function was run.
if start != cur_start:
page = QueryChanges(host, params, first_param, limit, o_params, start)
for cl in at_most_once(page):
yield cl
def MultiQueryChanges(host, params, change_list, limit=None, o_params=None,
start=None):
"""Initiate a query composed of multiple sets of query parameters."""
if not change_list:
raise RuntimeError(
"MultiQueryChanges requires a list of change numbers/id's")
q = ['q=%s' % '+OR+'.join([urllib.quote(str(x)) for x in change_list])]
if params:
q.append(_QueryString(params))
if limit:
q.append('n=%d' % limit)
if start:
q.append('S=%s' % start)
if o_params:
q.extend(['o=%s' % p for p in o_params])
path = 'changes/?%s' % '&'.join(q)
try:
result = ReadHttpJsonResponse(CreateHttpConn(host, path))
except GerritError as e:
msg = '%s:\n%s' % (e.message, path)
raise GerritError(e.http_status, msg)
return result
def GetGerritFetchUrl(host):
"""Given a gerrit host name returns URL of a gerrit instance to fetch from."""
return '%s://%s/' % (GERRIT_PROTOCOL, host)
def GetChangePageUrl(host, change_number):
"""Given a gerrit host name and change number, return change page url."""
return '%s://%s/#/c/%d/' % (GERRIT_PROTOCOL, host, change_number)
def GetChangeUrl(host, change):
"""Given a gerrit host name and change id, return an url for the change."""
return '%s://%s/a/changes/%s' % (GERRIT_PROTOCOL, host, change)
def GetChange(host, change):
"""Query a gerrit server for information about a single change."""
path = 'changes/%s' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeDetail(host, change, o_params=None):
"""Query a gerrit server for extended information about a single change."""
path = 'changes/%s/detail' % change
if o_params:
path += '?%s' % '&'.join(['o=%s' % p for p in o_params])
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeCommit(host, change, revision='current'):
"""Query a gerrit server for a revision associated with a change."""
path = 'changes/%s/revisions/%s/commit?links' % (change, revision)
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeCurrentRevision(host, change):
"""Get information about the latest revision for a given change."""
return QueryChanges(host, [], change, o_params=('CURRENT_REVISION',))
def GetChangeRevisions(host, change):
"""Get information about all revisions associated with a change."""
return QueryChanges(host, [], change, o_params=('ALL_REVISIONS',))
def GetChangeReview(host, change, revision=None):
"""Get the current review information for a change."""
if not revision:
jmsg = GetChangeRevisions(host, change)
if not jmsg:
return None
elif len(jmsg) > 1:
raise GerritError(200, 'Multiple changes found for ChangeId %s.' % change)
revision = jmsg[0]['current_revision']
path = 'changes/%s/revisions/%s/review'
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetChangeComments(host, change):
"""Get the line- and file-level comments on a change."""
path = 'changes/%s/comments' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def AbandonChange(host, change, msg=''):
"""Abandon a gerrit change."""
path = 'changes/%s/abandon' % change
body = {'message': msg} if msg else {}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn)
def RestoreChange(host, change, msg=''):
"""Restore a previously abandoned change."""
path = 'changes/%s/restore' % change
body = {'message': msg} if msg else {}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn)
def SubmitChange(host, change, wait_for_merge=True):
"""Submits a gerrit change via Gerrit."""
path = 'changes/%s/submit' % change
body = {'wait_for_merge': wait_for_merge}
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
return ReadHttpJsonResponse(conn)
def HasPendingChangeEdit(host, change):
conn = CreateHttpConn(host, 'changes/%s/edit' % change)
try:
ReadHttpResponse(conn)
except GerritError as e:
# 204 No Content means no pending change.
if e.http_status == 204:
return False
raise
return True
def DeletePendingChangeEdit(host, change):
conn = CreateHttpConn(host, 'changes/%s/edit' % change, reqtype='DELETE')
# On success, gerrit returns status 204; if the edit was already deleted it
# returns 404. Anything else is an error.
ReadHttpResponse(conn, accept_statuses=[204, 404])
def SetCommitMessage(host, change, description, notify='ALL'):
"""Updates a commit message."""
assert notify in ('ALL', 'NONE')
path = 'changes/%s/message' % change
body = {'message': description, 'notify': notify}
conn = CreateHttpConn(host, path, reqtype='PUT', body=body)
try:
ReadHttpResponse(conn, accept_statuses=[200, 204])
except GerritError as e:
raise GerritError(
e.http_status,
'Received unexpected http status while editing message '
'in change %s' % change)
def GetReviewers(host, change):
"""Get information about all reviewers attached to a change."""
path = 'changes/%s/reviewers' % change
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def GetReview(host, change, revision):
"""Get review information about a specific revision of a change."""
path = 'changes/%s/revisions/%s/review' % (change, revision)
return ReadHttpJsonResponse(CreateHttpConn(host, path))
def AddReviewers(host, change, reviewers=None, ccs=None, notify=True,
accept_statuses=frozenset([200, 400, 422])):
"""Add reviewers to a change."""
if not reviewers and not ccs:
return None
if not change:
return None
reviewers = frozenset(reviewers or [])
ccs = frozenset(ccs or [])
path = 'changes/%s/revisions/current/review' % change
body = {
'drafts': 'KEEP',
'reviewers': [],
'notify': 'ALL' if notify else 'NONE',
}
for r in sorted(reviewers | ccs):
state = 'REVIEWER' if r in reviewers else 'CC'
body['reviewers'].append({
'reviewer': r,
'state': state,
'notify': 'NONE', # We handled `notify` argument above.
})
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
# Gerrit will return 400 if one or more of the requested reviewers are
# unprocessable. We read the response object to see which were rejected,
# warn about them, and retry with the remainder.
resp = ReadHttpJsonResponse(conn, accept_statuses=accept_statuses)
errored = set()
for result in resp.get('reviewers', {}).itervalues():
r = result.get('input')
state = 'REVIEWER' if r in reviewers else 'CC'
if result.get('error'):
errored.add(r)
LOGGER.warn('Note: "%s" not added as a %s' % (r, state.lower()))
if errored:
# Try again, adding only those that didn't fail, and only accepting 200.
AddReviewers(host, change, reviewers=(reviewers-errored),
ccs=(ccs-errored), notify=notify, accept_statuses=[200])
def RemoveReviewers(host, change, remove=None):
"""Remove reveiewers from a change."""
if not remove:
return
if isinstance(remove, basestring):
remove = (remove,)
for r in remove:
path = 'changes/%s/reviewers/%s' % (change, r)
conn = CreateHttpConn(host, path, reqtype='DELETE')
try:
ReadHttpResponse(conn, accept_statuses=[204])
except GerritError as e:
raise GerritError(
e.http_status,
'Received unexpected http status while deleting reviewer "%s" '
'from change %s' % (r, change))
def SetReview(host, change, msg=None, labels=None, notify=None, ready=None):
"""Set labels and/or add a message to a code review."""
if not msg and not labels:
return
path = 'changes/%s/revisions/current/review' % change
body = {'drafts': 'KEEP'}
if msg:
body['message'] = msg
if labels:
body['labels'] = labels
if notify is not None:
body['notify'] = 'ALL' if notify else 'NONE'
if ready:
body['ready'] = True
conn = CreateHttpConn(host, path, reqtype='POST', body=body)
response = ReadHttpJsonResponse(conn)
if labels:
for key, val in labels.iteritems():
if ('labels' not in response or key not in response['labels'] or
int(response['labels'][key] != int(val))):
raise GerritError(200, 'Unable to set "%s" label on change %s.' % (
key, change))
def ResetReviewLabels(host, change, label, value='0', message=None,
notify=None):
"""Reset the value of a given label for all reviewers on a change."""
# This is tricky, because we want to work on the "current revision", but
# there's always the risk that "current revision" will change in between
# API calls. So, we check "current revision" at the beginning and end; if
# it has changed, raise an exception.
jmsg = GetChangeCurrentRevision(host, change)
if not jmsg:
raise GerritError(
200, 'Could not get review information for change "%s"' % change)
value = str(value)
revision = jmsg[0]['current_revision']
path = 'changes/%s/revisions/%s/review' % (change, revision)
message = message or (
'%s label set to %s programmatically.' % (label, value))
jmsg = GetReview(host, change, revision)
if not jmsg:
raise GerritError(200, 'Could not get review information for revison %s '
'of change %s' % (revision, change))
for review in jmsg.get('labels', {}).get(label, {}).get('all', []):
if str(review.get('value', value)) != value:
body = {
'drafts': 'KEEP',
'message': message,
'labels': {label: value},
'on_behalf_of': review['_account_id'],
}
if notify:
body['notify'] = notify
conn = CreateHttpConn(
host, path, reqtype='POST', body=body)
response = ReadHttpJsonResponse(conn)
if str(response['labels'][label]) != value:
username = review.get('email', jmsg.get('name', ''))
raise GerritError(200, 'Unable to set %s label for user "%s"'
' on change %s.' % (label, username, change))
jmsg = GetChangeCurrentRevision(host, change)
if not jmsg:
raise GerritError(
200, 'Could not get review information for change "%s"' % change)
elif jmsg[0]['current_revision'] != revision:
raise GerritError(200, 'While resetting labels on change "%s", '
'a new patchset was uploaded.' % change)
def CreateGerritBranch(host, project, branch, commit):
"""
Create a new branch from given project and commit
https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#create-branch
Returns:
A JSON with 'ref' key
"""
path = 'projects/%s/branches/%s' % (project, branch)
body = {'revision': commit}
conn = CreateHttpConn(host, path, reqtype='PUT', body=body)
response = ReadHttpJsonResponse(conn, accept_statuses=[201])
if response:
return response
raise GerritError(200, 'Unable to create gerrit branch')
def GetGerritBranch(host, project, branch):
"""
Get a branch from given project and commit
https://gerrit-review.googlesource.com/Documentation/rest-api-projects.html#get-branch
Returns:
A JSON object with 'revision' key
"""
path = 'projects/%s/branches/%s' % (project, branch)
conn = CreateHttpConn(host, path, reqtype='GET')
response = ReadHttpJsonResponse(conn)
if response:
return response
raise GerritError(200, 'Unable to get gerrit branch')
def GetAccountDetails(host, account_id='self'):
"""Returns details of the account.
If account_id is not given, uses magic value 'self' which corresponds to
whichever account user is authenticating as.
Documentation:
https://gerrit-review.googlesource.com/Documentation/rest-api-accounts.html#get-account
"""
if account_id != 'self':
account_id = int(account_id)
conn = CreateHttpConn(host, '/accounts/%s' % account_id)
return ReadHttpJsonResponse(conn)
def PercentEncodeForGitRef(original):
"""Apply percent-encoding for strings sent to gerrit via git ref metadata.
The encoding used is based on but stricter than URL encoding (Section 2.1
of RFC 3986). The only non-escaped characters are alphanumerics, and
'SPACE' (U+0020) can be represented as 'LOW LINE' (U+005F) or
'PLUS SIGN' (U+002B).
For more information, see the Gerrit docs here:
https://gerrit-review.googlesource.com/Documentation/user-upload.html#message
"""
safe = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 '
encoded = ''.join(c if c in safe else '%%%02X' % ord(c) for c in original)
# spaces are not allowed in git refs; gerrit will interpret either '_' or
# '+' (or '%20') as space. Use '_' since that has been supported the longest.
return encoded.replace(' ', '_')
@contextlib.contextmanager
def tempdir():
tdir = None
try:
tdir = tempfile.mkdtemp(suffix='gerrit_util')
yield tdir
finally:
if tdir:
gclient_utils.rmtree(tdir)
| mit | -4,422,473,123,791,376,000 | 33.646048 | 95 | 0.659426 | false | 3.64278 | false | false | false |
aconchillo/bitpacket | src/BitPacket/MetaField.py | 2 | 3997 | #!/usr/bin/env python
#
# @file MetaField.py
# @brief A proxy for another field.
# @author Aleix Conchillo Flaque <aconchillo@gmail.com>
# @date Fri Jan 15, 2010 10:22
#
# Copyright (C) 2010, 2011, 2012 Aleix Conchillo Flaque
#
# This file is part of BitPacket.
#
# BitPacket is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitPacket is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BitPacket. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = '''
MetaField field
===============
**API reference**: :class:`MetaField`
'''
from BitPacket.Field import Field
class MetaField(Field):
@staticmethod
def _raise_error(instance):
raise TypeError("No field created for MetaField '%s'" % instance.name())
@staticmethod
def _non_proxyable():
return ["_field", "_fieldfunc", "_create_field",
"_encode", "_decode", "_set_name", "write"]
def __init__(self, name, fieldfunc):
Field.__init__(self, name)
self._fieldfunc = fieldfunc
self._field = None
def type(self):
if self._field:
return type(self._field)
else:
return type(self._create_field())
def _encode(self, stream):
if self._field:
self._field._encode(stream)
else:
self._raise_error(self)
def _decode(self, stream):
self._field = self._create_field()
self._field._decode(stream)
def _create_field(self):
# Call name(), root() and parent() before proxy is
# available.
name = self.name()
root = self.root()
parent = self.parent()
field = self._fieldfunc(root)
field._set_name(name)
field._set_root(root)
field._set_parent(parent)
return field
def __len__(self):
if self._field:
return len(self._field)
else:
self._raise_error(self)
def __repr__(self):
if self._field:
return repr(self._field)
else:
self._raise_error(self)
def __getitem__(self, name):
if self._field:
return self._field[name]
else:
self._raise_error(self)
def __setitem__(self, name, value):
if self._field:
self._field[name] = value
else:
self._raise_error(self)
def __getattribute__(self, name):
try:
# We'll get an exception due to _field access in __init__,
# as _field attribute does not exist yet.
field = object.__getattribute__(self, "_field")
except AttributeError:
field = None
# Get the list of methods that should not be forwarded.
non_proxyable = object.__getattribute__(self, "_non_proxyable")()
# If _field is created and we are accessing a proxyable
# attribute, then forward it to _field.
if field and name not in non_proxyable:
return object.__getattribute__(field, name)
else:
return object.__getattribute__(self, name)
# import array
# from BitPacket.Structure import Structure
# from BitPacket.Integer import *
# class Test(Structure):
# def __init__(self):
# Structure.__init__(self, "tesbabat")
# self.append(UInt8("value1"))
# self.append(UInt8("value2"))
# s = Structure("metastruct")
# ss = Structure("substruct")
# s.append(ss)
# f = MetaField("test", lambda ctx: Test())
# ss.append(f)
# s.set_array(array.array("B", [123, 124]))
# print s
| gpl-3.0 | 1,189,359,358,328,893,400 | 26.756944 | 80 | 0.592695 | false | 3.850674 | false | false | false |
kcsaff/CA | src/algorithms/xx2.py | 1 | 4286 | # Copyright (C) 2010 by Kevin Saff
# This file is part of the CA scanner.
# The CA scanner is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# The CA scanner is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with the CA scanner. If not, see <http://www.gnu.org/licenses/>.
"""
This algorithm handles 256 states, packed at 1 cell per byte.
A cell is considered 'active' if its low bit is 1. Otherwise, it is inactive.
The state of a cell after evolution depends on its complete 8-bite state
and on the activity of the 8 cells in its Moore(1) neighborhood.
The bits for the lookup table are as follows:
[ 0][ 1][ 2]
[ 3][*4][ 5]
[ 6][ 7][ 8]
Bits 9-15 are the remainder of the state, minus the activity bit.
"""
import generate
def evolve(input, output, lookup):
"""Evolve it."""
return generate.inline("""
PyArrayObject *input;
PyArrayObject *output;
PyArrayObject *lookup;
char h = 0;
unsigned xstride, ystride;
unsigned xa, x0, x1, xM;
unsigned /*ya,*/ y0, y1, yM;
unsigned z;
char c, n, r;
if (!PyArg_ParseTuple(args, "O!O!O!",
&PyArray_Type, &input,
&PyArray_Type, &output,
&PyArray_Type, &lookup
))
return NULL;
xstride = input-> strides[0];
ystride = input-> strides[1];
xM = (input-> dimensions[0] - 1) * xstride;
yM = (input-> dimensions[1] - 1) * ystride;
xa = 0;
for (x0 = xstride; x0 < xM; x0 += xstride)
{
xa = x0 - xstride;
x1 = x0 + xstride;
n = input-> data[x0 + 1 * ystride];
z = ((input-> data[xa + 0 * ystride] & 1) << 0x3) |
((input-> data[x0 + 0 * ystride] & 1) << 0x4) |
((input-> data[x1 + 0 * ystride] & 1) << 0x5) |
((input-> data[xa + 1 * ystride] & 1) << 0x6) |
((n & 1) << 0x7) |
((input-> data[x1 + 1 * ystride] & 1) << 0x8) ;
for (y0 = ystride; y0 < yM; y0 += ystride)
{
z >>= 3;
y1 = y0 + ystride;
c = n;
n = input-> data[x0 + y1];
z |=((input-> data[xa + y1] & 1) << 0x6) |
((n & 1) << 0x7) |
((input-> data[x1 + y1] & 1) << 0x8) ;
r = lookup-> data[z | ((c & 0xFE) << 8)];
output-> data[x0 + y0] = (char)r;
}
}
return PyFloat_FromDouble(1.0);
""")(input, output, lookup)
import numpy
from _util import bit_count, register
from _algorithm import algorithm
@register('compile_rule', type='life', quality=1.0)
def _life(X):
lookup0 = []
for i in range(0x200):
if bit_count[i & 0x1EF] in (X.birth, X.survival)[(i & 0x10) and 1]:
lookup0.append(1)
else:
lookup0.append(0)
return algorithm('bytescan',
evolve=evolve,
table=numpy.tile(numpy.asarray(lookup0, dtype = numpy.uint8), 0x80),
states=(0,1))
@register('compile_rule', type='brain', quality=0.9)
def _brain(X):
lookup = numpy.ndarray(shape=0x10000, dtype=numpy.uint8)
mdecay = 2 * (X.decay + 1)
if mdecay > 256:
mdecay = 256
for i in range(0x10000):
if i & 0x10: #alive
if bit_count[i & 0x1EF] in X.survival:
lookup[i] = 1
else:
lookup[i] = 2 % mdecay
elif i < 0x200: #dead
lookup[i] = bit_count[i & 0x1FF] in X.birth and 1 or 0
else: #dying
lookup[i] = ((i >> 9) * 2 + 2) % mdecay
return algorithm('bytescan',
evolve=evolve,
table=lookup,
states=range(2) + range(2, (X.decay + 1) * 2, 2),
)
| gpl-3.0 | -7,492,452,754,597,536,000 | 29.985075 | 80 | 0.526365 | false | 3.281776 | false | false | false |
physion/ovation-python | ovation/util/licensed.py | 1 | 2180 | """
Utilities for working with github/licensed cached license data
"""
import pathlib
import yaml
import csv
from six import StringIO
_LICENSE_URLS = {'apache-2.0': 'https://www.apache.org/licenses/LICENSE-2.0',
'mit': 'https://opensource.org/licenses/MIT',
'gpl2': 'https://www.gnu.org/licenses/old-licenses/gpl-2.0.txt',
'gpl3': 'https://www.gnu.org/licenses/gpl-3.0.txt',
'gpl-3.0': 'https://www.gnu.org/licenses/gpl-3.0.txt',
'bsd-2-clause': 'https://opensource.org/licenses/BSD-2-Clause',
'bsd-3-clause': 'https://opensource.org/licenses/BSD-3-Clause',
'other': 'unknown'}
_LICENSE_NAMES = {'apache-2.0': 'Apache License Version 2.0',
'mit': 'MIT',
'gpl2': 'GNU General Public License Version 2',
'gpl3': 'GNU General Public License Version 3',
'gpl-3.0': 'GNU General Public License Version 3',
'bsd-2-clause': 'Simplified BSD License',
'bsd-3-clause': 'BSD License'}
def cached_license_dict(dep_path: pathlib.Path) -> dict:
with dep_path.open('r') as f:
dep = yaml.safe_load(f)
dep['URL'] = _LICENSE_URLS.get(dep['license'], 'unknown')
dep['license'] = _LICENSE_NAMES.get(dep['license'], dep['license'])
return {(k.capitalize() if k is not "URL" else k): dep[k] for k in ['name', 'version', 'license', 'URL']}
def cached_licenses(cache_path: pathlib.Path) -> [dict]:
return [cached_license_dict(p) for p in cache_path.glob("**/*.dep.yml")]
def cached_license_csv(cache_path: pathlib.Path, output: pathlib.Path = None) -> str:
licenses = cached_licenses(cache_path)
if output is None:
with StringIO() as f:
_write_licenses_csv(f, licenses)
return f.getvalue()
else:
with output.open('w', newline='') as f:
_write_licenses_csv(f, licenses)
def _write_licenses_csv(f, licenses):
fieldnames = ['Name', 'Version', 'License', 'URL']
writer = csv.DictWriter(f, fieldnames)
writer.writeheader()
writer.writerows(licenses)
| gpl-3.0 | 7,859,681,477,088,932,000 | 37.928571 | 113 | 0.583945 | false | 3.471338 | false | false | false |
zongtongyi/leetcode | 023_Merge_k_Sorted_Lists.py | 1 | 1641 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
def printlist(head):
print head.val
while head.next:
head = head.next
print head.val
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
import heapq
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if len(lists) == 0: return []
heap = []
td = {}
ml = ListNode(0)
tml = ml
while True:
if len(lists) == lists.count(None): break
for i in range(len(lists)):
node = lists[i]
if not node: continue
heapq.heappush(heap, node.val)
td.setdefault(node.val, []).append(node)
lists[i] = lists[i].next
if not heap: return []
val = heapq.heappop(heap)
tml.next = ListNode(val)
tml = tml.next
while heap:
val = heapq.heappop(heap)
tml.next = ListNode(val)
tml = tml.next
return ml.next
if __name__ == "__main__":
l1 = ListNode(1)
l1.next = ListNode(2)
l1.next.next = ListNode(3)
l2 = ListNode(6)
l2.next = ListNode(7)
l2.next.next = ListNode(8)
l3 = ListNode(9)
l3.next = ListNode(10)
l3.next.next = ListNode(11)
lists = [l1, l2, l3]
lists = [[]]
l = Solution().mergeKLists(lists)
# printlist(l)
| mit | -6,833,300,006,282,485,000 | 21.791667 | 56 | 0.473492 | false | 3.521459 | false | false | false |
mozbhearsum/balrog | auslib/web/common/releases.py | 2 | 3014 | import json
import logging
from connexion import problem, request
from flask import Response, jsonify
from auslib.global_state import dbo
from auslib.web.common.csrf import get_csrf_headers
log = logging.getLogger(__name__)
def strip_data(release):
"""Return a release with all the fields except data.
This is used in multiple APIs to save bandwidth and present a
simplified view of the release by removing its largest field,
data, which is of no use except when serving clients.
"""
return dict((k, v) for (k, v) in release.items() if k != "data")
def release_list(request):
kwargs = {}
if request.args.get("product"):
kwargs["product"] = request.args.get("product")
if request.args.get("name_prefix"):
kwargs["name_prefix"] = request.args.get("name_prefix")
if request.args.get("names_only"):
kwargs["nameOnly"] = True
return dbo.releases.getReleaseInfo(**kwargs)
def serialize_releases(request, releases):
if request.args.get("names_only"):
names = []
for release in releases:
names.append(release["name"])
data = {"names": names}
else:
data = {"releases": [strip_data(release) for release in releases]}
return jsonify(data)
def get_releases():
releases = release_list(request)
return serialize_releases(request, releases)
def _get_release(release):
releases = dbo.releases.getReleases(name=release, limit=1)
return releases[0] if releases else None
def get_release(release, with_csrf_header=False):
release = _get_release(release)
if not release:
return problem(404, "Not Found", "Release name: %s not found" % release)
headers = {"X-Data-Version": release["data_version"]}
if with_csrf_header:
headers.update(get_csrf_headers())
if request.args.get("pretty"):
indent = 4
separators = (",", ": ")
else:
indent = None
separators = None
# separators set manually due to https://bugs.python.org/issue16333 affecting Python 2
return Response(response=json.dumps(release["data"], indent=indent, separators=separators, sort_keys=True), mimetype="application/json", headers=headers)
def get_release_with_csrf_header(release):
return get_release(release, with_csrf_header=True)
def get_release_single_locale(release, platform, locale, with_csrf_header=False):
try:
locale = dbo.releases.getLocale(release, platform, locale)
except KeyError as e:
return problem(404, "Not Found", json.dumps(e.args))
data_version = dbo.releases.getReleases(name=release)[0]["data_version"]
headers = {"X-Data-Version": data_version}
if with_csrf_header:
headers.update(get_csrf_headers())
return Response(response=json.dumps(locale), mimetype="application/json", headers=headers)
def get_release_single_locale_with_csrf_header(release, platform, locale):
return get_release_single_locale(release, platform, locale, with_csrf_header=True)
| mpl-2.0 | -5,088,813,654,994,552,000 | 32.865169 | 157 | 0.683809 | false | 3.7675 | false | false | false |
vikas86/python_flask_vikas | vikas.py | 1 | 2073 | import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, jsonify,render_template, flash
from contextlib import closing
DATABASE = '/tmp/vikas.db'
DEBUG = True
SECRET_KEY = 'vikas123'
USERNAME = 'admin'
PASSWORD = 'admin'
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run()
| gpl-2.0 | -4,087,519,908,299,340,300 | 27.791667 | 102 | 0.622287 | false | 3.54359 | false | false | false |
roberzguerra/RSSReader | RSSReader/settings.py | 1 | 2431 | # -*- coding:utf-8 -*-
"""
Django settings for RSSReader project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&t_1&t$fh5(+e%fb-boxy9q-#s##e*@aa1p23z*@#iqbypoq%)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DESENV = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rss_reader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'RSSReader.urls'
WSGI_APPLICATION = 'RSSReader.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
) | apache-2.0 | -8,745,893,467,391,407,000 | 24.6 | 75 | 0.712464 | false | 3.258713 | false | false | false |
ZeitOnline/zeit.content.author | src/zeit/content/author/tests/test_reference.py | 1 | 5878 | from zeit.cms.checkout.interfaces import ICheckoutManager
import gocept.testing.mock
import lxml.etree
import mock
import zeit.cms.content.interfaces
import zeit.cms.testing
import zeit.content.article.edit.author
import zeit.content.author.author
import zeit.content.author.testing
import zope.component
class AuthorshipXMLReferenceUpdater(zeit.cms.testing.FunctionalTestCase):
layer = zeit.content.author.testing.ZCML_LAYER
def setUp(self):
super(AuthorshipXMLReferenceUpdater, self).setUp()
self.shakespeare = zeit.content.author.author.Author()
self.shakespeare.firstname = 'William'
self.shakespeare.lastname = 'Shakespeare'
self.repository['shakespeare'] = self.shakespeare
def test_location_is_copied(self):
content = self.repository['testcontent']
content.authorships = (content.authorships.create(self.shakespeare),)
content.authorships[0].location = 'London'
reference = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertEllipsis("""\
<reference...>
...
<title xsi:nil="true"/>
...
<author href="http://xml.zeit.de/shakespeare"...>
<display_name...>William Shakespeare</display_name>
<location>London</location>
</author>
</reference> """, lxml.etree.tostring(reference, pretty_print=True))
def test_old_author_nodes_are_removed(self):
andersen = zeit.content.author.author.Author()
andersen.firstname = 'Hans Christian'
andersen.lastname = 'Andersen'
self.repository['andersen'] = andersen
content = self.repository['testcontent']
content.authorships = (content.authorships.create(self.shakespeare),)
reference = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='related')
content.authorships = (content.authorships.create(andersen),)
zeit.cms.content.interfaces.IXMLReferenceUpdater(
content).update(reference)
reference = lxml.etree.tostring(reference, pretty_print=True)
self.assertEllipsis(
'...<author href="http://xml.zeit.de/andersen"...', reference)
self.assertNotIn('shakespeare', reference)
def test_works_with_security(self):
with zeit.cms.checkout.helper.checked_out(
self.repository['testcontent'], temporary=False) as co:
co = zope.security.proxy.ProxyFactory(co)
co.authorships = (co.authorships.create(self.shakespeare),)
co.authorships[0].location = 'London'
reference = zope.component.getAdapter(
co, zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertIn(
'William Shakespeare',
lxml.etree.tostring(reference, pretty_print=True))
def test_fills_in_bbb_author_attribute(self):
andersen = zeit.content.author.author.Author()
andersen.firstname = 'Hans Christian'
andersen.lastname = 'Andersen'
self.repository['andersen'] = andersen
content = self.repository['testcontent']
reference = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertNotIn(
'author=""', lxml.etree.tostring(reference, pretty_print=True))
content.authorships = (
content.authorships.create(self.shakespeare),
content.authorships.create(andersen))
reference = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertEllipsis(
"""<reference...
author="William Shakespeare;Hans Christian Andersen"...""",
lxml.etree.tostring(reference, pretty_print=True))
def test_updater_suppress_errors(self):
content = ICheckoutManager(self.repository['testcontent']).checkout()
content.authorships = (content.authorships.create(self.shakespeare),)
# This error condition cannot be synthesized easily (would need to make
# an Author lose its metadata so it's treated as
# PersistentUnknownResource).
with mock.patch('zeit.content.author.author.Author.display_name',
gocept.testing.mock.Property()) as display_name:
display_name.side_effect = AttributeError()
with self.assertNothingRaised():
updater = zeit.cms.content.interfaces.IXMLReferenceUpdater(
content)
updater.update(content.xml, suppress_errors=True)
class RelatedReferenceTest(zeit.cms.testing.FunctionalTestCase):
layer = zeit.content.author.testing.ZCML_LAYER
def setUp(self):
super(RelatedReferenceTest, self).setUp()
self.repository['testauthor'] = zeit.content.author.author.Author()
self.author = self.repository['testauthor']
self.reference_container = zeit.content.article.edit.author.Author(
self.author, self.author.xml)
def test_author_can_be_adapted_to_IXMLReference(self):
result = zope.component.getAdapter(
self.author,
zeit.cms.content.interfaces.IXMLReference,
name='related')
self.assertEqual('author', result.tag)
self.assertEqual(self.author.uniqueId, result.get('href'))
def test_author_can_be_adapted_to_IReference(self):
from zeit.content.author.interfaces import IAuthorBioReference
result = zope.component.getMultiAdapter(
(self.reference_container, self.author.xml),
zeit.cms.content.interfaces.IReference, name='related')
result.biography = 'bio'
self.assertEqual(True, IAuthorBioReference.providedBy(result))
self.assertEqual('bio', result.xml.biography.text)
| bsd-3-clause | 4,233,580,874,815,613,000 | 40.985714 | 79 | 0.669956 | false | 3.821847 | true | false | false |
dimagol/trex-core | scripts/external_libs/simpy-3.0.10/simpy/rt.py | 5 | 2983 | """Execution environment for events that synchronizes passing of time
with the real-time (aka *wall-clock time*).
"""
try:
# Python >= 3.3
from time import monotonic as time, sleep
except ImportError:
# Python < 3.3
from time import time, sleep
from simpy.core import Environment, EmptySchedule, Infinity
class RealtimeEnvironment(Environment):
"""Execution environment for an event-based simulation which is
synchronized with the real-time (also known as wall-clock time). A time
step will take *factor* seconds of real time (one second by default).
A step from ``0`` to ``3`` with a ``factor=0.5`` will, for example, take at
least
1.5 seconds.
The :meth:`step()` method will raise a :exc:`RuntimeError` if a time step
took too long to compute. This behaviour can be disabled by setting
*strict* to ``False``.
"""
def __init__(self, initial_time=0, factor=1.0, strict=True):
Environment.__init__(self, initial_time)
self.env_start = initial_time
self.real_start = time()
self._factor = factor
self._strict = strict
@property
def factor(self):
"""Scaling factor of the real-time."""
return self._factor
@property
def strict(self):
"""Running mode of the environment. :meth:`step()` will raise a
:exc:`RuntimeError` if this is set to ``True`` and the processing of
events takes too long."""
return self._strict
def sync(self):
"""Synchronize the internal time with the current wall-clock time.
This can be useful to prevent :meth:`step()` from raising an error if
a lot of time passes between creating the RealtimeEnvironment and
calling :meth:`run()` or :meth:`step()`.
"""
self.real_start = time()
def step(self):
"""Process the next event after enough real-time has passed for the
event to happen.
The delay is scaled according to the real-time :attr:`factor`. With
:attr:`strict` mode enabled, a :exc:`RuntimeError` will be raised, if
the event is processed too slowly.
"""
evt_time = self.peek()
if evt_time is Infinity:
raise EmptySchedule()
real_time = self.real_start + (evt_time - self.env_start) * self.factor
if self.strict and time() - real_time > self.factor:
# Events scheduled for time *t* may take just up to *t+1*
# for their computation, before an error is raised.
raise RuntimeError('Simulation too slow for real time (%.3fs).' % (
time() - real_time))
# Sleep in a loop to fix inaccuracies of windows (see
# http://stackoverflow.com/a/15967564 for details) and to ignore
# interrupts.
while True:
delta = real_time - time()
if delta <= 0:
break
sleep(delta)
return Environment.step(self)
| apache-2.0 | -8,112,766,933,077,260,000 | 32.516854 | 79 | 0.617164 | false | 4.207334 | false | false | false |
patricklaw/pants | tests/python/pants_test/integration/remote_cache_integration_test.py | 3 | 2836 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.engine.internals.native_engine_pyo3 import PyExecutor, PyStubCAS
from pants.option.global_options import RemoteCacheWarningsBehavior
from pants.option.scope import GLOBAL_SCOPE_CONFIG_SECTION
from pants.testutil.pants_integration_test import run_pants
def test_warns_on_remote_cache_errors():
executor = PyExecutor(core_threads=2, max_threads=4)
cas = PyStubCAS.builder().always_errors().build(executor)
def run(behavior: RemoteCacheWarningsBehavior) -> str:
pants_run = run_pants(
[
"--backend-packages=['pants.backend.python']",
"--no-dynamic-ui",
"package",
"testprojects/src/python/hello/main:main",
],
use_pantsd=False,
config={
GLOBAL_SCOPE_CONFIG_SECTION: {
"remote_cache_read": True,
"remote_cache_write": True,
"remote_cache_warnings": behavior.value,
# NB: Our options code expects `grpc://`, which it will then convert back to
# `http://` before sending over FFI.
"remote_store_address": cas.address.replace("http://", "grpc://"),
}
},
)
pants_run.assert_success()
return pants_run.stderr
def read_err(i: int) -> str:
return f"Failed to read from remote cache ({i} occurrences so far): Unimplemented"
def write_err(i: int) -> str:
return (
f'Failed to write to remote cache ({i} occurrences so far): Internal: "StubCAS is '
f'configured to always fail"'
)
first_read_err = read_err(1)
first_write_err = write_err(1)
third_read_err = read_err(3)
third_write_err = write_err(3)
fourth_read_err = read_err(4)
fourth_write_err = write_err(4)
ignore_result = run(RemoteCacheWarningsBehavior.ignore)
for err in [
first_read_err,
first_write_err,
third_read_err,
third_write_err,
fourth_read_err,
fourth_write_err,
]:
assert err not in ignore_result
first_only_result = run(RemoteCacheWarningsBehavior.first_only)
for err in [first_read_err, first_write_err]:
assert err in first_only_result
for err in [third_read_err, third_write_err, fourth_read_err, fourth_write_err]:
assert err not in first_only_result
backoff_result = run(RemoteCacheWarningsBehavior.backoff)
for err in [first_read_err, first_write_err, fourth_read_err, fourth_write_err]:
assert err in backoff_result
for err in [third_read_err, third_write_err]:
assert err not in backoff_result
| apache-2.0 | 5,741,512,221,581,652,000 | 37.324324 | 96 | 0.61354 | false | 3.82726 | false | false | false |
scottkirkwood/mm2odp | mm2odp/update_odp.py | 1 | 4744 | #!/usr/bin/env python
#
"""Modify an open office document.
Specifically designed to modify an OpenOffice Presentation.
"""
__author__ = 'scott@google.com (scottkirkwood))'
import sys
import zipfile
import copy
try:
import xml.etree.ElementTree as ET # Python 2.5
except:
try:
import cElementTree as ET # Faster
except:
import elementtree.ElementTree as ET
NS_OFFICE = '{urn:oasis:names:tc:opendocument:xmlns:office:1.0}'
NS_DRAW = '{urn:oasis:names:tc:opendocument:xmlns:drawing:1.0}'
NS_TEXT = '{urn:oasis:names:tc:opendocument:xmlns:text:1.0}'
def dump(elems):
lines = []
for elem in elems.getiterator():
lines.append(str(elem))
return '\n'.join(lines)
class UpdateOdp:
def __init__(self, meta, pageitems):
"""ctor
Args:
pageitems: list of [(slide-title, [listitems, ...]), ...]
"""
self.meta = meta
self.pages = pageitems
def ReadWrite(self, infname, outfname):
"""Read in in fname, and write out to out fname.
infname and outfname must be different, the idea is that infname is a
'template' and outfname is the final version.
"""
z_in = zipfile.ZipFile(infname, 'r')
z_out = zipfile.ZipFile(outfname, 'w')
for info in z_in.infolist():
text = z_in.read(info.filename)
text = self.CallBack(info, text)
z_out.writestr(info, text)
z_out.close()
z_in.close()
def CallBack(self, zipinfo, text):
if zipinfo.filename == 'content.xml':
et = ET.fromstring(text)
et = self.UpdateContent(et)
text = ET.tostring(et)
text = text.encode('utf-8', 'xmlcharrefreplace')
return text
def UpdateContent(self, et):
"""Update content.xml.
Pure function, no side effects.
Args:
text: text representation of content.xml.
Returns:
Elementtree
"""
presentation = et.find('.//%sbody/%spresentation' % (NS_OFFICE, NS_OFFICE))
page_copy = copy.deepcopy(presentation[1])
del presentation[1:]
texts = presentation[0].findall('.//%sp' % (NS_TEXT))
for index, text in enumerate(texts):
if index == 0:
text.text = self.meta['title']
elif index == 1:
text.text = self.meta['subtitle']
elif index == 2:
text.text = self.meta['author']
for page in self.pages:
title = page[0]
items = page[1]
page_copycopy = copy.deepcopy(page_copy)
text_boxes = page_copycopy.findall('.//%stext-box' % NS_DRAW)
textp = text_boxes[0].findall('.//%sp' % NS_TEXT)
textp[0].text = title
list_copy = copy.deepcopy(text_boxes[1].find('.//%slist' % NS_TEXT))
del text_boxes[1][0:]
self._recurse_items(items, list_copy, text_boxes[1], 1)
presentation.append(page_copycopy)
return et
def _recurse_items(self, items, list_copy, text_box, depth):
"""Search for line items of a certain depth.
"""
findp = './/%sp' % NS_TEXT
for item in items:
if isinstance(item, list):
tofind = '%slist-item' % NS_TEXT
stylename = '%sstyle-name' % NS_TEXT
list_copycopy = copy.deepcopy(list_copy)
for node in list_copycopy.getiterator():
if node.tag == tofind:
if len(node.getchildren()):
node.remove(node.getchildren()[0])
else:
raise 'Unable to find any child nodes for "%s"' % node
newsubnode = copy.deepcopy(list_copy)
del newsubnode.attrib[stylename]
textp = newsubnode.findall(findp)
if textp:
textp = textp[0]
text = textp.attrib[stylename]
text = text[0:-1] + str(int(text[-1]) + 1)
textp.attrib[stylename] = text
textp.text = ''
node.append(newsubnode)
else:
raise 'Unable to find2 "%s" in "%s"' % (findp,
dump(newsubnode))
break
self._recurse_items(item, list_copycopy, text_box, depth + 1)
else:
list_copycopy = copy.deepcopy(list_copy)
textp = list_copycopy.findall(findp)
if textp:
textp = textp[0]
textp.text = item
else:
raise 'Unable to find "%s" in "%s"' % (findp,
dump(list_copycopy))
text_box.append(list_copycopy)
def main(argv):
import optparse
parser = optparse.OptionParser()
options, args = parser.parse_args()
meta = {
'title' : 'My title',
}
pages = [
('Page 1', ['Line 1', 'Line 2']),
('Page 2', ['Line 3', 'Line 4']),
]
update_odf = UpdateOdp(meta, pages)
update_odf.ReadWrite('template.odp', 'sample-out.odp')
if __name__ == '__main__':
main(sys.argv)
| gpl-2.0 | 366,265,514,852,888,100 | 28.465839 | 79 | 0.580101 | false | 3.400717 | false | false | false |
edio/scribscrob | scribscrob/model.py | 1 | 1154 | STOP = "stop"
PLAY = "play"
PAUSE = "pause"
class Song:
"""
Simple structure over currentsong dict returned by MPDClient
"""
def __init__(self, song: dict):
"""
Create instance from dictionary returned by MPDClient
"""
# primary
self.title = song.get('title')
self.artist = song.get('artist')
self.album = song.get('album')
self.file = song.get('file')
# derived
self.isstream = self.file.startswith('http')
if not self.isstream:
self.length = int(song['time']) * 1000
def __repr__(self):
nstr = lambda s: s if s else "<empty>"
source = "[http]" if self.isstream else "[file]"
return "{:s} {:s} - {:s}".format(source, nstr(self.artist), nstr(self.title))
class Status:
"""
Simple structure over status dict returned by MPDClient
"""
def __init__(self, status: dict):
self.state = status['state']
self.elapsed = status['elapsed'] if self.state != STOP else None
def __str__(self):
return "{:s}({:s})".format(self.state, self.elapsed if self.elapsed else "")
| gpl-3.0 | 3,224,746,788,041,680,400 | 26.47619 | 85 | 0.564125 | false | 3.771242 | false | false | false |
dongweiming/web_develop | chapter3/section3/orm_sql.py | 1 | 1240 | # coding=utf-8
from sqlalchemy import create_engine, Column, Integer, String, Sequence
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import and_, or_
from sqlalchemy.orm import sessionmaker
from consts import DB_URI
eng = create_engine(DB_URI)
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, Sequence('user_id_seq'),
primary_key=True, autoincrement=True)
name = Column(String(50))
Base.metadata.drop_all(bind=eng)
Base.metadata.create_all(bind=eng)
Session = sessionmaker(bind=eng)
session = Session()
session.add_all([User(name=username)
for username in ('xiaoming', 'wanglang', 'lilei')])
session.commit()
def get_result(rs):
print '-' * 20
for user in rs:
print user.name
rs = session.query(User).all()
get_result(rs)
rs = session.query(User).filter(User.id.in_([2, ]))
get_result(rs)
rs = session.query(User).filter(and_(User.id > 2, User.id < 4))
get_result(rs)
rs = session.query(User).filter(or_(User.id == 2, User.id == 4))
get_result(rs)
rs = session.query(User).filter(User.name.like('%min%'))
get_result(rs)
user = session.query(User).filter_by(name='xiaoming').first()
get_result([user])
| gpl-3.0 | 4,338,049,061,396,765,700 | 24.833333 | 71 | 0.683871 | false | 3.076923 | false | false | false |
dokipen/trac | trac/resource.py | 1 | 15270 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006-2007 Alec Thomas <alec@swapoff.org>
# Copyright (C) 2007 Christian Boos <cboos@neuf.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christian Boos <cboos@neuf.fr>
# Alec Thomas <alec@swapoff.org>
from trac.core import *
from trac.util.translation import _
class ResourceNotFound(TracError):
"""Thrown when a non-existent resource is requested"""
class IResourceManager(Interface):
def get_resource_realms():
"""Return resource realms managed by the component.
:rtype: `basestring` generator
"""
def get_resource_url(resource, href, **kwargs):
"""Return the canonical URL for displaying the given resource.
:param resource: a `Resource`
:param href: an `Href` used for creating the URL
Note that if there's no special rule associated to this realm for
creating URLs (i.e. the standard convention of using realm/id applies),
then it's OK to not define this method.
"""
def get_resource_description(resource, format='default', context=None,
**kwargs):
"""Return a string representation of the resource, according to the
`format`.
:param resource: the `Resource` to describe
:param format: the kind of description wanted. Typical formats are:
`'default'`, `'compact'` or `'summary'`.
:param context: an optional rendering context to allow rendering rich
output (like markup containing links)
:type context: `Context`
Additional keyword arguments can be given as extra information for
some formats.
For example, the ticket with the id 123 is represented as:
- `'#123'` in `'compact'` format,
- `'Ticket #123'` for the `default` format.
- `'Ticket #123 (closed defect): This is the summary'` for the
`'summary'` format
Note that it is also OK to not define this method if there's no
special way to represent the resource, in which case the standard
representations 'realm:id' (in compact mode) or 'Realm id' (in
default mode) will be used.
"""
class Resource(object):
"""Resource identifier.
This specifies as precisely as possible *which* resource from a Trac
environment is manipulated.
A resource is identified by:
(- a `project` identifier) 0.12?
- a `realm` (a string like `'wiki'` or `'ticket'`)
- an `id`, which uniquely identifies a resource within its realm.
If the `id` information is not set, then the resource represents
the realm as a whole.
- an optional `version` information.
If `version` is `None`, this refers by convention to the latest
version of the resource.
Some generic and commonly used rendering methods are associated as well
to the Resource object. Those properties and methods actually delegate
the real work to the Resource's manager.
"""
__slots__ = ('realm', 'id', 'version', 'parent')
def __repr__(self):
path = []
r = self
while r:
name = r.realm
if r.id:
name += ':' + unicode(r.id) # id can be numerical
if r.version is not None:
name += '@' + unicode(r.version)
path.append(name or '')
r = r.parent
return '<Resource %r>' % (', '.join(reversed(path)))
def __eq__(self, other):
return self.realm == other.realm and \
self.id == other.id and \
self.version == other.version and \
self.parent == other.parent
def __hash__(self):
"""Hash this resource descriptor, including its hierarchy."""
path = ()
current = self
while current:
path += (self.realm, self.id, self.version)
current = current.parent
return hash(path)
# -- methods for creating other Resource identifiers
def __new__(cls, resource_or_realm=None, id=False, version=False,
parent=False):
"""Create a new Resource object from a specification.
:param resource_or_realm: this can be either:
- a `Resource`, which is then used as a base for making a copy
- a `basestring`, used to specify a `realm`
:param id: the resource identifier
:param version: the version or `None` for indicating the latest version
>>> main = Resource('wiki', 'WikiStart')
>>> repr(main)
"<Resource u'wiki:WikiStart'>"
>>> Resource(main) is main
True
>>> main3 = Resource(main, version=3)
>>> repr(main3)
"<Resource u'wiki:WikiStart@3'>"
>>> main0 = main3(version=0)
>>> repr(main0)
"<Resource u'wiki:WikiStart@0'>"
In a copy, if `id` is overriden, then the original `version` value
will not be reused.
>>> repr(Resource(main3, id="WikiEnd"))
"<Resource u'wiki:WikiEnd'>"
>>> repr(Resource(None))
"<Resource ''>"
"""
realm = resource_or_realm
if isinstance(resource_or_realm, Resource):
if id is False and version is False and parent is False:
return resource_or_realm
else: # copy and override
realm = resource_or_realm.realm
if id is False:
id = resource_or_realm.id
if version is False:
if id == resource_or_realm.id:
version = resource_or_realm.version # could be 0...
else:
version = None
if parent is False:
parent = resource_or_realm.parent
else:
if id is False:
id = None
if version is False:
version = None
if parent is False:
parent = None
resource = super(Resource, cls).__new__(cls)
resource.realm = realm
resource.id = id
resource.version = version
resource.parent = parent
return resource
def __call__(self, realm=False, id=False, version=False, parent=False):
"""Create a new Resource using the current resource as a template.
Optional keyword arguments can be given to override `id` and
`version`.
"""
return Resource(realm is False and self or realm, id, version, parent)
# -- methods for retrieving children Resource identifiers
def child(self, realm, id=False, version=False):
"""Retrieve a child resource for a secondary `realm`.
Same as `__call__`, except that this one sets the parent to `self`.
>>> repr(Resource(None).child('attachment', 'file.txt'))
"<Resource u', attachment:file.txt'>"
"""
return Resource(realm, id, version, self)
class ResourceSystem(Component):
"""Resource identification and description manager.
This component makes the link between `Resource` identifiers and their
corresponding manager `Component`.
"""
resource_managers = ExtensionPoint(IResourceManager)
def __init__(self):
self._resource_managers_map = None
# Public methods
def get_resource_manager(self, realm):
"""Return the component responsible for resources in the given `realm`
:param realm: the realm name
:return: a `Component` implementing `IResourceManager` or `None`
"""
# build a dict of realm keys to IResourceManager implementations
if not self._resource_managers_map:
map = {}
for manager in self.resource_managers:
for manager_realm in manager.get_resource_realms():
map[manager_realm] = manager
self._resource_managers_map = map
return self._resource_managers_map.get(realm)
def get_known_realms(self):
"""Return a list of all the realm names of resource managers."""
realms = []
for manager in self.resource_managers:
for realm in manager.get_resource_realms():
realms.append(realm)
return realms
# -- Utilities for manipulating resources in a generic way
def get_resource_url(env, resource, href, **kwargs):
"""Retrieve the canonical URL for the given resource.
This function delegates the work to the resource manager for that
resource if it implements a `get_resource_url` method, otherwise
reverts to simple '/realm/identifier' style URLs.
:param env: the `Environment` where `IResourceManager` components live
:param resource: the `Resource` object specifying the Trac resource
:param href: an `Href` object used for building the URL
Additional keyword arguments are translated as query paramaters in the URL.
>>> from trac.test import EnvironmentStub
>>> from trac.web.href import Href
>>> env = EnvironmentStub()
>>> href = Href('/trac.cgi')
>>> main = Resource('generic', 'Main')
>>> get_resource_url(env, main, href)
'/trac.cgi/generic/Main'
>>> get_resource_url(env, main(version=3), href)
'/trac.cgi/generic/Main?version=3'
>>> get_resource_url(env, main(version=3), href)
'/trac.cgi/generic/Main?version=3'
>>> get_resource_url(env, main(version=3), href, action='diff')
'/trac.cgi/generic/Main?action=diff&version=3'
>>> get_resource_url(env, main(version=3), href, action='diff', version=5)
'/trac.cgi/generic/Main?action=diff&version=5'
"""
manager = ResourceSystem(env).get_resource_manager(resource.realm)
if not manager or not hasattr(manager, 'get_resource_url'):
args = {'version': resource.version}
args.update(kwargs)
return href(resource.realm, resource.id, **args)
else:
return manager.get_resource_url(resource, href, **kwargs)
def get_resource_description(env, resource, format='default', **kwargs):
"""Retrieve a standardized description for the given resource.
This function delegates the work to the resource manager for that
resource if it implements a `get_resource_description` method,
otherwise reverts to simple presentation of the realm and identifier
information.
:param env: the `Environment` where `IResourceManager` components live
:param resource: the `Resource` object specifying the Trac resource
:param format: which formats to use for the description
Additional keyword arguments can be provided and will be propagated
to resource manager that might make use of them (typically, a `context`
parameter for creating context dependent output).
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> main = Resource('generic', 'Main')
>>> get_resource_description(env, main)
u'generic:Main'
>>> get_resource_description(env, main(version=3))
u'generic:Main'
>>> get_resource_description(env, main(version=3), format='summary')
u'generic:Main at version 3'
"""
manager = ResourceSystem(env).get_resource_manager(resource.realm)
if not manager or not hasattr(manager, 'get_resource_description'):
name = u'%s:%s' % (resource.realm, resource.id)
if format == 'summary':
name += _(' at version %(version)s', version=resource.version)
return name
else:
return manager.get_resource_description(resource, format, **kwargs)
def get_resource_name(env, resource):
return get_resource_description(env, resource)
def get_resource_shortname(env, resource):
return get_resource_description(env, resource, 'compact')
def get_resource_summary(env, resource):
return get_resource_description(env, resource, 'summary')
def get_relative_resource(resource, path=''):
"""Build a Resource relative to a reference resource.
:param path: path leading to another resource within the same realm.
"""
if path in (None, '', '.'):
return resource
else:
base = unicode(path[0] != '/' and resource.id or '').split('/')
for comp in path.split('/'):
if comp == '..':
if base:
base.pop()
elif comp and comp != '.':
base.append(comp)
return resource(id=base and '/'.join(base) or None)
def get_relative_url(env, resource, href, path='', **kwargs):
"""Build an URL relative to a resource given as reference.
:param path: path leading to another resource within the same realm.
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> from trac.web.href import Href
>>> href = Href('/trac.cgi')
>>> main = Resource('wiki', 'Main', version=3)
Without parameters, return the canonical URL for the resource, like
`get_resource_url` does.
>>> get_relative_url(env, main, href)
'/trac.cgi/wiki/Main?version=3'
Paths are relative to the given resource:
>>> get_relative_url(env, main, href, '.')
'/trac.cgi/wiki/Main?version=3'
>>> get_relative_url(env, main, href, './Sub')
'/trac.cgi/wiki/Main/Sub'
>>> get_relative_url(env, main, href, './Sub/Infra')
'/trac.cgi/wiki/Main/Sub/Infra'
>>> get_relative_url(env, main, href, './Sub/')
'/trac.cgi/wiki/Main/Sub'
>>> mainsub = main(id='Main/Sub')
>>> get_relative_url(env, mainsub, href, '..')
'/trac.cgi/wiki/Main'
>>> get_relative_url(env, main, href, '../Other')
'/trac.cgi/wiki/Other'
References always stay within the current resource realm:
>>> get_relative_url(env, mainsub, href, '../..')
'/trac.cgi/wiki'
>>> get_relative_url(env, mainsub, href, '../../..')
'/trac.cgi/wiki'
>>> get_relative_url(env, mainsub, href, '/toplevel')
'/trac.cgi/wiki/toplevel'
Extra keyword arguments are forwarded as query parameters:
>>> get_relative_url(env, main, href, action='diff')
'/trac.cgi/wiki/Main?action=diff&version=3'
"""
return get_resource_url(env, get_relative_resource(resource, path),
href, **kwargs)
def render_resource_link(env, context, resource, format='default'):
"""Utility for generating a link `Element` to the given resource.
Some component manager may directly use an extra `context` parameter
in order to directly generate rich content. Otherwise, the textual output
is wrapped in a link to the resource.
"""
from genshi.builder import Element, tag
link = get_resource_description(env, resource, format, context=context)
if not isinstance(link, Element):
link = tag.a(link, href=get_resource_url(env, resource, context.href))
return link
| bsd-3-clause | -1,609,402,974,996,894,000 | 34.84507 | 79 | 0.623379 | false | 4.166439 | false | false | false |
LukeMurphey/splunk-google-drive | src/bin/google_drive_app/oauth2client/_pure_python_crypt.py | 59 | 6367 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pure Python crypto-related routines for oauth2client.
Uses the ``rsa``, ``pyasn1`` and ``pyasn1_modules`` packages
to parse PEM files storing PKCS#1 or PKCS#8 keys as well as
certificates.
"""
from pyasn1.codec.der import decoder
from pyasn1_modules import pem
from pyasn1_modules.rfc2459 import Certificate
from pyasn1_modules.rfc5208 import PrivateKeyInfo
import rsa
import six
from oauth2client import _helpers
_PKCS12_ERROR = r"""\
PKCS12 format is not supported by the RSA library.
Either install PyOpenSSL, or please convert .p12 format
to .pem format:
$ cat key.p12 | \
> openssl pkcs12 -nodes -nocerts -passin pass:notasecret | \
> openssl rsa > key.pem
"""
_POW2 = (128, 64, 32, 16, 8, 4, 2, 1)
_PKCS1_MARKER = ('-----BEGIN RSA PRIVATE KEY-----',
'-----END RSA PRIVATE KEY-----')
_PKCS8_MARKER = ('-----BEGIN PRIVATE KEY-----',
'-----END PRIVATE KEY-----')
_PKCS8_SPEC = PrivateKeyInfo()
def _bit_list_to_bytes(bit_list):
"""Converts an iterable of 1's and 0's to bytes.
Combines the list 8 at a time, treating each group of 8 bits
as a single byte.
"""
num_bits = len(bit_list)
byte_vals = bytearray()
for start in six.moves.xrange(0, num_bits, 8):
curr_bits = bit_list[start:start + 8]
char_val = sum(val * digit
for val, digit in zip(_POW2, curr_bits))
byte_vals.append(char_val)
return bytes(byte_vals)
class RsaVerifier(object):
"""Verifies the signature on a message.
Args:
pubkey: rsa.key.PublicKey (or equiv), The public key to verify with.
"""
def __init__(self, pubkey):
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string or bytes, The message to verify. If string, will be
encoded to bytes as utf-8.
signature: string or bytes, The signature on the message. If
string, will be encoded to bytes as utf-8.
Returns:
True if message was signed by the private key associated with the
public key that this object was constructed with.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
try:
return rsa.pkcs1.verify(message, signature, self._pubkey)
except (ValueError, rsa.pkcs1.VerificationError):
return False
@classmethod
def from_string(cls, key_pem, is_x509_cert):
"""Construct an RsaVerifier instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
is expected to be an RSA key in PEM format.
Returns:
RsaVerifier instance.
Raises:
ValueError: if the key_pem can't be parsed. In either case, error
will begin with 'No PEM start marker'. If
``is_x509_cert`` is True, will fail to find the
"-----BEGIN CERTIFICATE-----" error, otherwise fails
to find "-----BEGIN RSA PUBLIC KEY-----".
"""
key_pem = _helpers._to_bytes(key_pem)
if is_x509_cert:
der = rsa.pem.load_pem(key_pem, 'CERTIFICATE')
asn1_cert, remaining = decoder.decode(der, asn1Spec=Certificate())
if remaining != b'':
raise ValueError('Unused bytes', remaining)
cert_info = asn1_cert['tbsCertificate']['subjectPublicKeyInfo']
key_bytes = _bit_list_to_bytes(cert_info['subjectPublicKey'])
pubkey = rsa.PublicKey.load_pkcs1(key_bytes, 'DER')
else:
pubkey = rsa.PublicKey.load_pkcs1(key_pem, 'PEM')
return cls(pubkey)
class RsaSigner(object):
"""Signs messages with a private key.
Args:
pkey: rsa.key.PrivateKey (or equiv), The private key to sign with.
"""
def __init__(self, pkey):
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256')
@classmethod
def from_string(cls, key, password='notasecret'):
"""Construct an RsaSigner instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM
files.
Returns:
RsaSigner instance.
Raises:
ValueError if the key cannot be parsed as PKCS#1 or PKCS#8 in
PEM format.
"""
key = _helpers._from_bytes(key) # pem expects str in Py3
marker_id, key_bytes = pem.readPemBlocksFromFile(
six.StringIO(key), _PKCS1_MARKER, _PKCS8_MARKER)
if marker_id == 0:
pkey = rsa.key.PrivateKey.load_pkcs1(key_bytes,
format='DER')
elif marker_id == 1:
key_info, remaining = decoder.decode(
key_bytes, asn1Spec=_PKCS8_SPEC)
if remaining != b'':
raise ValueError('Unused bytes', remaining)
pkey_info = key_info.getComponentByName('privateKey')
pkey = rsa.key.PrivateKey.load_pkcs1(pkey_info.asOctets(),
format='DER')
else:
raise ValueError('No key could be detected.')
return cls(pkey)
| apache-2.0 | 1,831,056,636,899,550,000 | 33.603261 | 79 | 0.593215 | false | 4.02211 | false | false | false |
ladybug-tools/honeybee | honeybee_plus/radiance/command/rfluxmtx.py | 3 | 16283 | # coding=utf-8
from ._commandbase import RadianceCommand
from ..datatype import RadiancePath, RadianceValue, RadianceNumber
from ..datatype import RadianceBoolFlag
from ..parameters.rfluxmtx import RfluxmtxParameters
import os
# TODO (sarith and mostapha): move parameters such as output_data_format to command
# parameters. They are not command inputs.
class Rfluxmtx(RadianceCommand):
"""Radiance Rfluxmtx matrix."""
ground_string = """
void glow ground_glow
0
0
4 1 1 1 0
ground_glow source ground
0
0
4 0 0 -1 180
"""
sky_string = """
void glow sky_glow
0
0
4 1 1 1 0
sky_glow source sky
0
0
4 0 0 1 180
"""
@staticmethod
def control_parameters(hemi_type='u', hemi_up_direction='Y', output_file=''):
"""Rfluxmtx ControlParameters."""
return RfluxmtxControlParameters(hemi_type, hemi_up_direction, output_file)
@staticmethod
def default_sky_ground(file_name, sky_type=None, sky_file_format=None,
ground_file_format=None):
"""
Args:
file_name:This should be the name of the file to which the sky defintion
should be written to.
sky_type:The acceptable inputs for hemisphere type are:
u for uniform.(Usually applicable for ground).\n
kf for klems full.\n
kh for klems half.\n
kq for klems quarter.\n
rN for Reinhart - Tregenza type skies. N stands for
subdivisions and defaults to 1.\n
scN for shirley-chiu subdivisions.
Returns:
file_name: Passes back the same file_name that was provided as input.
"""
sky_param = Rfluxmtx.control_parameters(hemi_type=sky_type or 'r',
output_file=sky_file_format)
ground_param = Rfluxmtx.control_parameters(hemi_type='u',
output_file=ground_file_format)
ground_string = Rfluxmtx.add_control_parameters(Rfluxmtx.ground_string,
{'ground_glow': ground_param})
sky_string = Rfluxmtx.add_control_parameters(Rfluxmtx.sky_string,
{'sky_glow': sky_param})
with open(file_name, 'w')as skyFile:
skyFile.write(ground_string + '\n' + sky_string)
return file_name
@staticmethod
def add_control_parameters(input_string, modifier_dict):
if os.path.exists(input_string):
with open(input_string)as fileString:
file_data = fileString.read()
else:
file_data = input_string
output_string = ''
check_dict = dict.fromkeys(modifier_dict.keys(), None)
for lines in file_data.split('\n'):
for key, value in modifier_dict.items():
if key in lines and not check_dict[key] and \
not lines.strip().startswith('#'):
output_string += str(value) + '\n'
check_dict[key] = True
else:
output_string += lines.strip() + '\n'
for key, value in check_dict.items():
assert value, "The modifier %s was not found in the string specified" % key
if os.path.exists(input_string):
new_output_file = input_string[:-4] + '_cp_added' + input_string[-4:]
with open(new_output_file, 'w') as newoutput:
newoutput.write(output_string)
output_string = new_output_file
return output_string
@staticmethod
def check_for_rflux_parameters(file_val):
with open(file_val)as rfluxFile:
rflux_string = rfluxFile.read()
assert '#@rfluxmtx' in rflux_string, \
"The file %s does not have any rfluxmtx control parameters."
return True
# sender = RadiancePath('sender','sender file')
receiver_file = RadiancePath('receiver', 'receiver file')
octree_file = RadiancePath('octree', 'octree file', extension='.oct')
output_matrix = RadiancePath('output_matrix', 'output Matrix File')
view_rays_file = RadiancePath('view_rays_file',
'file containing ray samples generated by vwrays')
output_data_format = RadianceValue('f', 'output data format', is_joined=True)
verbose = RadianceBoolFlag('v', 'verbose commands in stdout')
num_processors = RadianceNumber('n', 'number of processors', num_type=int)
# TODO: This method misses RfluxmtxParameters as an input.
def __init__(self, sender=None, receiver_file=None, octree_file=None,
rad_files=None, points_file=None, output_matrix=None,
view_rays_file=None, view_info_file=None, output_filename_format=None,
num_processors=None):
RadianceCommand.__init__(self)
self.sender = sender
"""Sender file will be either a rad file containing rfluxmtx variables
or just a - """
self.receiver_file = receiver_file
"""Receiver file will usually be the sky file containing rfluxmtx
variables"""
self.octree_file = octree_file
"""Octree file containing the other rad file in the scene."""
self.rad_files = rad_files
"""Rad files other than the sender and receiver that are a part of the
scene."""
self.points_file = points_file
"""The points file or input vwrays for which the illuminance/luminance
value are to be calculated."""
self.number_of_points = 0
"""Number of test points. Initially set to 0."""
self.output_matrix = output_matrix
"""The flux matrix file that will be created by rfluxmtx."""
self.view_rays_file = view_rays_file
"""File containing ray samples generated from vwrays"""
self.view_info_file = view_info_file
"""File containing view dimensions calculated from vwrays."""
self.output_filename_format = output_filename_format
"""Filename format"""
self.num_processors = num_processors
"""Number of processors"""
@property
def output_filename_format(self):
return self._output_filename_format
@output_filename_format.setter
def output_filename_format(self, value):
# TODO: Add testing logic for this !
self._output_filename_format = value or None
@property
def view_info_file(self):
return self._view_info_file
@view_info_file.setter
def view_info_file(self, file_name):
"""
The input for this setter is a file containing the view dimensions
calculated through the -d option in rfluxmtx.
"""
if file_name:
assert os.path.exists(file_name),\
"The file %s specified as view_info_file does not exist." % file_name
self._view_info_file = file_name
with open(file_name) as view_fileName:
self._view_fileDimensions = view_fileName.read().strip()
else:
self._view_info_file = ''
self._view_fileDimensions = ''
@property
def points_file(self):
return self._points_file
@points_file.setter
def points_file(self, value):
if value:
if os.path.exists(value):
with open(value, 'rb') as pfile:
self.number_of_points = sum(1 for line in pfile if line.strip())
elif self.number_of_points == 0:
print('Warning: Failed to find the points_file at "{}".'
' Use number_of_points method to set the number_of_points'
'separately.')
self._points_file = value
else:
self._points_file = ''
@property
def rad_files(self):
"""Get and set scene files."""
return self.__rad_files
@rad_files.setter
def rad_files(self, files):
if files:
self.__rad_files = [os.path.normpath(f) for f in files]
else:
self.__rad_files = []
@property
def rfluxmtx_parameters(self):
return self.__rfluxmtx_parameters
@rfluxmtx_parameters.setter
def rfluxmtx_parameters(self, parameters):
self.__rfluxmtx_parameters = parameters or RfluxmtxParameters()
assert hasattr(self.rfluxmtx_parameters, "isRfluxmtxParameters"), \
"input rfluxmtx_parameters is not a valid parameters type."
def to_rad_string(self, relative_path=False):
octree = self.octree_file.to_rad_string()
octree = '-i %s' % self.normspace(octree) if octree else ''
output_data_format = self.output_data_format.to_rad_string()
verbose = self.verbose.to_rad_string()
number_of_processors = self.num_processors.to_rad_string()
number_of_points = '-y %s' % self.number_of_points \
if self.number_of_points > 0 else ''
points_file = self.normspace(self.points_file)
points_file = '< %s' % points_file if points_file else ''
view_file_samples = self.normspace(self.view_rays_file.to_rad_string())
view_file_samples = '< %s' % view_file_samples if view_file_samples else ''
assert not (points_file and view_file_samples),\
'View file and points file cannot be specified at the same time!'
input_rays = points_file or view_file_samples
output_matrix = self.normspace(self.output_matrix.to_rad_string())
output_matrix = "> %s" % output_matrix if output_matrix else ''
output_filename_format = self.output_filename_format
output_filename_format = "-o %s" % output_filename_format if \
output_filename_format else ''
# method for adding an input or nothing to the command
def add_to_str(val):
return "%s " % val if val else ''
# Creating the string this way because it might change again in the
# future.
rad_string = ["%s " % self.normspace(os.path.join(self.radbin_path, 'rfluxmtx'))]
rad_string.append(add_to_str(output_data_format))
rad_string.append(add_to_str(verbose))
rad_string.append(add_to_str(number_of_processors))
rad_string.append(add_to_str(number_of_points))
rad_string.append(add_to_str(self._view_fileDimensions))
if str(self.sender).strip() == '-':
rad_string.append(add_to_str(self.rfluxmtx_parameters.to_rad_string()))
else:
# -I and -i options are only valid for pass-through cases
rflux_par = add_to_str(
self.rfluxmtx_parameters.to_rad_string()).replace(
'-I', '')
rad_string.append(rflux_par)
rad_string.append(add_to_str(output_filename_format))
rad_string.append(add_to_str(self.sender))
rad_string.append(add_to_str(self.normspace(self.receiver_file.to_rad_string())))
rad_string.append(add_to_str(" ".join(self.rad_files)))
rad_string.append(add_to_str(octree))
rad_string.append(add_to_str(input_rays))
rad_string.append(add_to_str(output_matrix))
return ''.join(rad_string)
@property
def input_files(self):
return [self.receiver_file] + self.rad_files
class RfluxmtxControlParameters(object):
"""Rfluxmtx ControlParameters.
Set the values for hemispheretype, hemisphere up direction and output file
location (optional).
"""
def __init__(self, hemi_type='u', hemi_up_direction='Y', output_file=''):
"""Init class."""
self.hemisphere_type = hemi_type
"""
The acceptable inputs for hemisphere type are:
u for uniform.(Usually applicable for ground).
kf for klems full.
kh for klems half.
kq for klems quarter.
rN for Reinhart - Tregenza type skies. N stands for subdivisions
and defaults to 1.
scN for shirley-chiu subdivisions."""
self.hemisphere_up_direction = hemi_up_direction
"""The acceptable inputs for hemisphere direction are %s""" % \
(",".join(('X', 'Y', 'Z', 'x', 'y', 'z', '-X', '-Y',
'-Z', '-x', '-y', '-z')))
self.output_file = output_file
@property
def hemisphere_type(self):
return self._hemisphereType
@hemisphere_type.setter
def hemisphere_type(self, value):
"""Hemisphere type.
The acceptable inputs for hemisphere type are:
u for uniform.(Usually applicable for ground).
kf for klems full.
kh for klems half.
kq for klems quarter.
rN for Reinhart - Tregenza type skies. N stands for subdivisions and
defaults to 1.
scN for shirley-chiu subdivisions.
"""
if value:
if value in ('u', 'kf', 'kh', 'kq'):
self._hemisphereType = value
return
elif value.startswith('r'):
if len(value) > 1:
try:
num = int(value[1:])
except ValueError:
raise Exception(
"The format reinhart tregenza type skies is rN ."
"The value entered was %s" % value)
else:
num = ''
self._hemisphereType = 'r' + str(num)
elif value.startswith('sc'):
if len(value) > 2:
try:
num = int(value[2:])
except ValueError:
raise Exception(
"The format for ShirleyChiu type values is scN."
"The value entered was %s" % value)
else:
raise Exception(
"The format for ShirleyChiu type values is scN."
"The value entered was %s" % value)
self._hemisphereType = 'sc' + str(num)
else:
except_str = """
The acceptable inputs for hemisphere type are:
u for uniform.(Usually applicable for ground).
kf for klems full.
kh for klems half.
kq for klems quarter.
rN for Reinhart - Tregenza type skies. N stands for
subdivisions and defaults to 1.
scN for shirley-chiu subdivisions.
The value entered was %s
""" % (value)
raise Exception(except_str)
@property
def hemisphere_up_direction(self):
return self._hemisphere_upDirection
@hemisphere_up_direction.setter
def hemisphere_up_direction(self, value):
"""hemisphere direction.
The acceptable inputs for hemisphere direction are a tuple with 3 values
or 'X', 'Y', 'Z', 'x', 'y', 'z', '-X', '-Y','-Z', '-x', '-y','-z'.
"""
allowed_values = ('X', 'Y', 'Z', 'x', 'y', 'z', '-X', '-Y',
'-Z', '-x', '-y', '-z', "+X", "+Y", "+Z",
'+x', "+y", "+z")
if isinstance(value, (tuple, list)):
assert len(value) == 3, \
'Length of emisphereUpDirection vector should be 3.'
self._hemisphere_upDirection = ','.join((str(v) for v in value))
elif value:
assert value in allowed_values, "The value for hemisphere_upDirection" \
"should be one of the following: %s" \
% (','.join(allowed_values))
self._hemisphere_upDirection = value
else:
self._hemisphere_upDirection = '+Z'
def __str__(self):
output_file_spec = "o=%s" % self.output_file if self.output_file else ''
return "#@rfluxmtx h=%s u=%s %s" % (self.hemisphere_type,
self.hemisphere_up_direction,
output_file_spec)
| gpl-3.0 | 188,205,192,736,165,800 | 37.223005 | 89 | 0.560769 | false | 3.951225 | false | false | false |
testmana2/test | E5Gui/E5ClickableLabel.py | 2 | 1532 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing a clickable label.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSignal, Qt, QPoint
from PyQt5.QtWidgets import QLabel
class E5ClickableLabel(QLabel):
"""
Class implementing a clickable label.
@signal clicked(QPoint) emitted upon a click on the label
with the left button
@signal middleClicked(QPoint) emitted upon a click on the label
with the middle button or CTRL and left button
"""
clicked = pyqtSignal(QPoint)
middleClicked = pyqtSignal(QPoint)
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(E5ClickableLabel, self).__init__(parent)
def mouseReleaseEvent(self, evt):
"""
Protected method handling mouse release events.
@param evt mouse event (QMouseEvent)
"""
if evt.button() == Qt.LeftButton and self.rect().contains(evt.pos()):
if evt.modifiers() == Qt.ControlModifier:
self.middleClicked.emit(evt.globalPos())
else:
self.clicked.emit(evt.globalPos())
elif evt.button() == Qt.MidButton and \
self.rect().contains(evt.pos()):
self.middleClicked.emit(evt.globalPos())
else:
super(E5ClickableLabel, self).mouseReleaseEvent(evt)
| gpl-3.0 | 385,187,944,502,301,200 | 29.039216 | 77 | 0.615535 | false | 4.085333 | false | false | false |
goodcrypto/goodcrypto-libs | syr/pyjs.py | 1 | 1207 | #! /usr/bin/env python
'''
Python to javascript compiler.
Do not confuse this pyjs module with the python dialect at pyjs.org.
Currently compiles RapydScript, a dialect of python designed to
compile to javascript.
Copyright 2013-2016 GoodCrypto
Last modified: 2016-04-20
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
assert False, 'Untested'
import sys
IS_PY2 = sys.version_info[0] == 2
import os
from tempfile import mkstemp
if IS_PY2:
from cStringIO import StringIO
else:
from io import StringIO
from rapydscript.compiler import parse_file, finalize_source
def compile(py):
''' Compile python to javascript.
As of 2012-01 RapydScript does not have a module function that
compiles. Compiling requires a source file, not a stream or string.
'''
# save py code to file
rsfile, rsfilename = mkstemp()
os.write(rsfile, py)
os.close(rsfile)
parse_output = StringIO()
handler = parse_file(rsfilename, parse_output)
os.unlink(rsfilename)
js = finalize_source(parse_output.getvalue(), handler)
parse_output.close()
return js
| gpl-3.0 | 466,930,403,348,249,900 | 22.211538 | 82 | 0.70174 | false | 3.725309 | false | false | false |
aisk/leancloud-python-sdk | leancloud/message.py | 1 | 5791 | # coding: utf-8
"""
实时通讯消息相关操作。
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
from typing import Any
from typing import Dict
from typing import Generator
from typing import List
from typing import Optional
from typing import Union
import six
from leancloud import client
class Message(object):
def __init__(self):
self.bin = None # type: bool
self.conversation_id = None # type: str
self.data = None # type: str
self.from_client = None # type: str
self.from_ip = None # type: str
self.is_conversation = None # type: bool
self.is_room = None # type: bool
self.message_id = None # type: str
self.timestamp = None # type: float
self.to = None # type: str
@classmethod
def _find(cls, query_params): # type: (dict) -> Generator[Message, None, None]
content = client.get('/rtm/messages/history', params=query_params).json()
for data in content:
msg = cls()
msg._update_data(data)
yield msg
@classmethod
def find_by_conversation(cls, conversation_id, limit=None, reversed=None, after_time=None, after_message_id=None):
# type: (str, Optional[int], Optional[bool], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
"""获取某个对话中的聊天记录
:param conversation_id: 对话 id
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param reversed: 以默认排序相反的方向返回结果,服务端默认为 False
:param after_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param after_message_id: 起始的消息 id,使用时必须加上对应消息的时间 after_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
"""
query_params = {} # type: Dict[str, Any]
query_params['convid'] = conversation_id
if limit is not None:
query_params['limit'] = limit
if reversed is not None:
query_params['reversed'] = reversed
if isinstance(after_time, datetime):
query_params['max_ts'] = after_time.timestamp() * 1000
elif isinstance(after_time, six.integer_types) or isinstance(after_time, float):
query_params['max_ts'] = after_time * 1000
if after_message_id is not None:
query_params['msgid'] = after_message_id
return list(cls._find(query_params))
@classmethod
def find_by_client(cls, from_client, limit=None, after_time=None, after_message_id=None):
# type: (str, Optional[int], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
"""获取某个 client 的聊天记录
:param from_client: 要获取聊天记录的 client id
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param after_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param after_message_id: 起始的消息 id,使用时必须加上对应消息的时间 after_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
"""
query_params = {} # type: Dict[str, Any]
query_params['from'] = from_client
if limit is not None:
query_params['limit'] = limit
if isinstance(after_time, datetime):
query_params['max_ts'] = after_time.timestamp() * 1000
elif isinstance(after_time, six.integer_types) or isinstance(after_time, float):
query_params['max_ts'] = after_time * 1000
if after_message_id is not None:
query_params['msgid'] = after_message_id
return list(cls._find(query_params))
@classmethod
def find_all(cls, limit=None, after_time=None, after_message_id=None):
# type: (Optional[int], Optional[Union[datetime, float]], Optional[str]) -> List[Message]
"""获取应用全部聊天记录
:param limit: 返回条数限制,可选,服务端默认 100 条,最大 1000 条
:param after_time: 查询起始的时间戳,返回小于这个时间(不包含)的记录,服务端默认是当前时间
:param after_message_id: 起始的消息 id,使用时必须加上对应消息的时间 after_time 参数,一起作为查询的起点
:return: 符合条件的聊天记录
"""
query_params = {} # type: Dict[str, Any]
if limit is not None:
query_params['limit'] = limit
if isinstance(after_time, datetime):
query_params['max_ts'] = after_time.timestamp() * 1000
elif isinstance(after_time, six.integer_types) or isinstance(after_time, float):
query_params['max_ts'] = after_time * 1000
if after_message_id is not None:
query_params['msgid'] = after_message_id
return list(cls._find(query_params))
def _update_data(self, server_data): # type: (dict) -> None
self.bin = server_data.get('bin')
self.conversation_id = server_data.get('conv-id')
self.data = server_data.get('data')
self.from_client = server_data.get('from')
self.from_ip = server_data.get('from-ip')
self.is_conversation = server_data.get('is-conv')
self.is_room = server_data.get('is-room')
self.message_id = server_data.get('msg-id')
self.timestamp = server_data.get('timestamp', 0) / 1000
self.to = server_data.get('to')
| lgpl-3.0 | 5,837,776,280,869,108,000 | 39.277778 | 118 | 0.621478 | false | 2.780822 | false | false | false |
Thingee/cinder | cinder/volume/api.py | 1 | 43519 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes.
"""
import collections
import functools
from oslo.config import cfg
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.image import glance
from cinder import keymgr
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import utils
from cinder.volume.flows.api import create_volume
from cinder.volume import qos_specs
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
volume_host_opt = cfg.BoolOpt('snapshot_same_host',
default=True,
help='Create volume from snapshot at the host '
'where snapshot resides')
volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az',
default=True,
help='Ensure that the new volumes are the '
'same AZ as snapshot or source volume')
CONF = cfg.CONF
CONF.register_opt(volume_host_opt)
CONF.register_opt(volume_same_az_opt)
CONF.import_opt('storage_availability_zone', 'cinder.volume.manager')
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution
This decorator requires the first 3 args of the wrapped function
to be (self, context, volume)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target.update(target_obj or {})
_action = 'volume:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager."""
def __init__(self, db_driver=None, image_service=None):
self.image_service = (image_service or
glance.get_default_image_service())
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zone_names = ()
self.key_manager = keymgr.API()
super(API, self).__init__(db_driver)
def _valid_availability_zone(self, availability_zone):
#NOTE(bcwaldon): This approach to caching fails to handle the case
# that an availability zone is disabled/removed.
if availability_zone in self.availability_zone_names:
return True
if CONF.storage_availability_zone == availability_zone:
return True
azs = self.list_availability_zones()
self.availability_zone_names = [az['name'] for az in azs]
return availability_zone in self.availability_zone_names
def list_availability_zones(self):
"""Describe the known availability zones
:retval list of dicts, each with a 'name' and 'available' key
"""
topic = CONF.volume_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
az_data = [(s['availability_zone'], s['disabled']) for s in services]
disabled_map = {}
for (az_name, disabled) in az_data:
tracked_disabled = disabled_map.get(az_name, True)
disabled_map[az_name] = tracked_disabled and disabled
azs = [{'name': name, 'available': not disabled}
for (name, disabled) in disabled_map.items()]
return tuple(azs)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None, source_volume=None,
scheduler_hints=None, backup_source_volume=None):
if source_volume and volume_type:
if volume_type['id'] != source_volume['volume_type_id']:
msg = _("Invalid volume_type provided (requested type "
"must match source volume, or be omitted). "
"You should omit the argument.")
raise exception.InvalidInput(reason=msg)
if snapshot and volume_type:
if volume_type['id'] != snapshot['volume_type_id']:
msg = _("Invalid volume_type provided (requested type "
"must match source snapshot, or be omitted). "
"You should omit the argument.")
raise exception.InvalidInput(reason=msg)
def check_volume_az_zone(availability_zone):
try:
return self._valid_availability_zone(availability_zone)
except exception.CinderException:
LOG.exception(_("Unable to query if %s is in the "
"availability zone set"), availability_zone)
return False
create_what = {
'context': context,
'raw_size': size,
'name': name,
'description': description,
'snapshot': snapshot,
'image_id': image_id,
'raw_volume_type': volume_type,
'metadata': metadata,
'raw_availability_zone': availability_zone,
'source_volume': source_volume,
'scheduler_hints': scheduler_hints,
'key_manager': self.key_manager,
'backup_source_volume': backup_source_volume,
}
try:
flow_engine = create_volume.get_flow(self.scheduler_rpcapi,
self.volume_rpcapi,
self.db,
self.image_service,
check_volume_az_zone,
create_what)
except Exception:
LOG.exception(_("Failed to create api volume flow"))
raise exception.CinderException(
_("Failed to create api volume flow"))
flow_engine.run()
volume = flow_engine.storage.fetch('volume')
return volume
@wrap_check_policy
def delete(self, context, volume, force=False, unmanage_only=False):
if context.is_admin and context.project_id != volume['project_id']:
project_id = volume['project_id']
else:
project_id = context.project_id
volume_id = volume['id']
if not volume['host']:
volume_utils.notify_about_volume_usage(context,
volume, "delete.start")
# NOTE(vish): scheduling failed, so delete it
# Note(zhiteng): update volume quota reservation
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume['volume_type_id'])
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_("Failed to update quota for deleting volume"))
self.db.volume_destroy(context.elevated(), volume_id)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
volume_utils.notify_about_volume_usage(context,
volume, "delete.end")
return
if not force and volume['status'] not in ["available", "error",
"error_restoring",
"error_extending"]:
msg = _("Volume status must be available or error, "
"but current status is: %s") % volume['status']
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Volume cannot be deleted while migrating")
raise exception.InvalidVolume(reason=msg)
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
if len(snapshots):
msg = _("Volume still has %d dependent snapshots") % len(snapshots)
raise exception.InvalidVolume(reason=msg)
# If the volume is encrypted, delete its encryption key from the key
# manager. This operation makes volume deletion an irreversible process
# because the volume cannot be decrypted without its key.
encryption_key_id = volume.get('encryption_key_id', None)
if encryption_key_id is not None:
self.key_manager.delete_key(context, encryption_key_id)
now = timeutils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
self.volume_rpcapi.delete_volume(context, volume, unmanage_only)
@wrap_check_policy
def update(self, context, volume, fields):
self.db.volume_update(context, volume['id'], fields)
def get(self, context, volume_id):
rv = self.db.volume_get(context, volume_id)
volume = dict(rv.iteritems())
check_policy(context, 'get', volume)
return volume
def get_all(self, context, marker=None, limit=None, sort_key='created_at',
sort_dir='desc', filters=None):
check_policy(context, 'get_all')
if filters == None:
filters = {}
try:
if limit is not None:
limit = int(limit)
if limit < 0:
msg = _('limit param must be positive')
raise exception.InvalidInput(reason=msg)
except ValueError:
msg = _('limit param must be an integer')
raise exception.InvalidInput(reason=msg)
# Non-admin shouldn't see temporary target of a volume migration, add
# unique filter data to reflect that only volumes with a NULL
# 'migration_status' or a 'migration_status' that does not start with
# 'target:' should be returned (processed in db/sqlalchemy/api.py)
if not context.is_admin:
filters['no_migration_targets'] = True
if filters:
LOG.debug(_("Searching by: %s") % str(filters))
if (context.is_admin and 'all_tenants' in filters):
# Need to remove all_tenants to pass the filtering below.
del filters['all_tenants']
volumes = self.db.volume_get_all(context, marker, limit, sort_key,
sort_dir, filters=filters)
else:
volumes = self.db.volume_get_all_by_project(context,
context.project_id,
marker, limit,
sort_key, sort_dir,
filters=filters)
return volumes
def get_snapshot(self, context, snapshot_id):
check_policy(context, 'get_snapshot')
rv = self.db.snapshot_get(context, snapshot_id)
return dict(rv.iteritems())
def get_volume(self, context, volume_id):
check_policy(context, 'get_volume')
rv = self.db.volume_get(context, volume_id)
return dict(rv.iteritems())
def get_all_snapshots(self, context, search_opts=None):
check_policy(context, 'get_all_snapshots')
search_opts = search_opts or {}
if (context.is_admin and 'all_tenants' in search_opts):
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
snapshots = self.db.snapshot_get_all(context)
else:
snapshots = self.db.snapshot_get_all_by_project(
context, context.project_id)
if search_opts:
LOG.debug(_("Searching by: %s") % search_opts)
results = []
not_found = object()
for snapshot in snapshots:
for opt, value in search_opts.iteritems():
if snapshot.get(opt, not_found) != value:
break
else:
results.append(snapshot)
snapshots = results
return snapshots
@wrap_check_policy
def check_attach(self, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def check_detach(self, volume):
# TODO(vish): abstract status checking?
if volume['status'] != "in-use":
msg = _("status must be in-use to detach")
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def reserve_volume(self, context, volume):
#NOTE(jdg): check for Race condition bug 1096983
#explicitly get updated ref and check
volume = self.db.volume_get(context, volume['id'])
if volume['status'] == 'available':
self.update(context, volume, {"status": "attaching"})
else:
msg = _("Volume status must be available to reserve")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def unreserve_volume(self, context, volume):
if volume['status'] == "attaching":
self.update(context, volume, {"status": "available"})
@wrap_check_policy
def begin_detaching(self, context, volume):
# If we are in the middle of a volume migration, we don't want the user
# to see that the volume is 'detaching'. Having 'migration_status' set
# will have the same effect internally.
if not volume['migration_status']:
self.update(context, volume, {"status": "detaching"})
@wrap_check_policy
def roll_detaching(self, context, volume):
if volume['status'] == "detaching":
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint,
mode)
@wrap_check_policy
def detach(self, context, volume):
return self.volume_rpcapi.detach_volume(context, volume)
@wrap_check_policy
def initialize_connection(self, context, volume, connector):
return self.volume_rpcapi.initialize_connection(context,
volume,
connector)
@wrap_check_policy
def terminate_connection(self, context, volume, connector, force=False):
self.unreserve_volume(context, volume)
return self.volume_rpcapi.terminate_connection(context,
volume,
connector,
force)
@wrap_check_policy
def accept_transfer(self, context, volume, new_user, new_project):
return self.volume_rpcapi.accept_transfer(context,
volume,
new_user,
new_project)
def _create_snapshot(self, context,
volume, name, description,
force=False, metadata=None):
check_policy(context, 'create_snapshot', volume)
if volume['migration_status'] is not None:
# Volume is migrating, wait until done
msg = _("Snapshot cannot be created while volume is migrating")
raise exception.InvalidVolume(reason=msg)
if ((not force) and (volume['status'] != "available")):
msg = _("must be available")
raise exception.InvalidVolume(reason=msg)
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': 1}
else:
reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.get('volume_type_id'))
reservations = QUOTAS.reserve(context, **reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG snapshot (%(d_consumed)dG of "
"%(d_quota)dG already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': volume['size'],
'd_consumed': _consumed(over),
'd_quota': quotas[over]})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=volume['size'],
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
elif 'snapshots' in over:
msg = _("Quota exceeded for %(s_pid)s, tried to create "
"snapshot (%(d_consumed)d snapshots "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
'd_consumed': _consumed(over)})
raise exception.SnapshotLimitExceeded(
allowed=quotas[over])
self._check_metadata_properties(metadata)
options = {'volume_id': volume['id'],
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'progress': '0%',
'volume_size': volume['size'],
'display_name': name,
'display_description': description,
'volume_type_id': volume['volume_type_id'],
'encryption_key_id': volume['encryption_key_id'],
'metadata': metadata}
try:
snapshot = self.db.snapshot_create(context, options)
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.db.snapshot_destroy(context, volume['id'])
finally:
QUOTAS.rollback(context, reservations)
self.volume_rpcapi.create_snapshot(context, volume, snapshot)
return snapshot
def create_snapshot(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
False, metadata)
def create_snapshot_force(self, context,
volume, name,
description, metadata=None):
return self._create_snapshot(context, volume, name, description,
True, metadata)
@wrap_check_policy
def delete_snapshot(self, context, snapshot, force=False):
if not force and snapshot['status'] not in ["available", "error"]:
msg = _("Volume Snapshot status must be available or error")
raise exception.InvalidSnapshot(reason=msg)
self.db.snapshot_update(context, snapshot['id'],
{'status': 'deleting'})
volume = self.db.volume_get(context, snapshot['volume_id'])
self.volume_rpcapi.delete_snapshot(context, snapshot, volume['host'])
@wrap_check_policy
def update_snapshot(self, context, snapshot, fields):
self.db.snapshot_update(context, snapshot['id'], fields)
@wrap_check_policy
def get_volume_metadata(self, context, volume):
"""Get all metadata associated with a volume."""
rv = self.db.volume_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_metadata(self, context, volume, key):
"""Delete the given metadata item from a volume."""
self.db.volume_metadata_delete(context, volume['id'], key)
def _check_metadata_properties(self, metadata=None):
if not metadata:
metadata = {}
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidVolumeMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidVolumeMetadataSize(reason=msg)
@wrap_check_policy
def update_volume_metadata(self, context, volume, metadata, delete=False):
"""Updates or creates volume metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.volume_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_volume_metadata_value(self, volume, key):
"""Get value of particular metadata key."""
metadata = volume.get('volume_metadata')
if metadata:
for i in volume['volume_metadata']:
if i['key'] == key:
return i['value']
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_volume_admin_metadata(context, volume)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
return dict(rv.iteritems())
def delete_snapshot_metadata(self, context, snapshot, key):
"""Delete the given metadata item from a snapshot."""
self.db.snapshot_metadata_delete(context, snapshot['id'], key)
def update_snapshot_metadata(self, context,
snapshot, metadata,
delete=False):
"""Updates or creates snapshot metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
if delete:
_metadata = metadata
else:
orig_meta = self.get_snapshot_metadata(context, snapshot)
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(_metadata)
db_meta = self.db.snapshot_metadata_update(context,
snapshot['id'],
_metadata,
True)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return db_meta
def get_snapshot_metadata_value(self, snapshot, key):
pass
def get_volumes_image_metadata(self, context):
check_policy(context, 'get_volumes_image_metadata')
db_data = self.db.volume_glance_metadata_get_all(context)
results = collections.defaultdict(dict)
for meta_entry in db_data:
results[meta_entry['volume_id']].update({meta_entry['key']:
meta_entry['value']})
return results
@wrap_check_policy
def get_volume_image_metadata(self, context, volume):
db_data = self.db.volume_glance_metadata_get(context, volume['id'])
return dict(
(meta_entry.key, meta_entry.value) for meta_entry in db_data
)
def _check_volume_availability(self, volume, force):
"""Check if the volume can be used."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
raise exception.InvalidVolume(reason=msg)
if not force and 'in-use' == volume['status']:
msg = _('Volume status is in-use.')
raise exception.InvalidVolume(reason=msg)
@wrap_check_policy
def copy_volume_to_image(self, context, volume, metadata, force):
"""Create a new image from the specified volume."""
self._check_volume_availability(volume, force)
recv_metadata = self.image_service.create(context, metadata)
self.update(context, volume, {'status': 'uploading'})
self.volume_rpcapi.copy_volume_to_image(context,
volume,
recv_metadata)
response = {"id": volume['id'],
"updated_at": volume['updated_at'],
"status": 'uploading',
"display_description": volume['display_description'],
"size": volume['size'],
"volume_type": volume['volume_type'],
"image_id": recv_metadata['id'],
"container_format": recv_metadata['container_format'],
"disk_format": recv_metadata['disk_format'],
"image_name": recv_metadata.get('name', None)}
return response
@wrap_check_policy
def extend(self, context, volume, new_size):
if volume['status'] != 'available':
msg = _('Volume status must be available to extend.')
raise exception.InvalidVolume(reason=msg)
size_increase = (int(new_size)) - volume['size']
if size_increase <= 0:
msg = (_("New size for extend must be greater "
"than current size. (current: %(size)s, "
"extended: %(new_size)s)") % {'new_size': new_size,
'size': volume['size']})
raise exception.InvalidInput(reason=msg)
try:
reservations = QUOTAS.reserve(context, gigabytes=+size_increase)
except exception.OverQuota as exc:
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
msg = _("Quota exceeded for %(s_pid)s, tried to extend volume by "
"%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG already "
"consumed).")
LOG.error(msg % {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=size_increase,
consumed=_consumed('gigabytes'),
quota=quotas['gigabytes'])
self.update(context, volume, {'status': 'extending'})
self.volume_rpcapi.extend_volume(context, volume, new_size,
reservations)
@wrap_check_policy
def migrate_volume(self, context, volume, host, force_host_copy):
"""Migrate the volume to the specified host."""
# We only handle "available" volumes for now
if volume['status'] not in ['available', 'in-use']:
msg = _('Volume status must be available/in-use.')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure volume is not part of a migration
if volume['migration_status'] is not None:
msg = _("Volume is already part of an active migration")
raise exception.InvalidVolume(reason=msg)
# We only handle volumes without snapshots for now
snaps = self.db.snapshot_get_all_for_volume(context, volume['id'])
if snaps:
msg = _("volume must not have snapshots")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Make sure the host is in the list of available hosts
elevated = context.elevated()
topic = CONF.volume_topic
services = self.db.service_get_all_by_topic(elevated,
topic,
disabled=False)
found = False
for service in services:
if utils.service_is_up(service) and service['host'] == host:
found = True
if not found:
msg = (_('No available service named %s') % host)
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
# Make sure the destination host is different than the current one
if host == volume['host']:
msg = _('Destination host must be different than current host')
LOG.error(msg)
raise exception.InvalidHost(reason=msg)
self.update(context, volume, {'migration_status': 'starting'})
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume_type = {}
volume_type_id = volume['volume_type_id']
if volume_type_id:
volume_type = volume_types.get_volume_type(context, volume_type_id)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id']}
self.scheduler_rpcapi.migrate_volume_to_host(context,
CONF.volume_topic,
volume['id'],
host,
force_host_copy,
request_spec)
@wrap_check_policy
def migrate_volume_completion(self, context, volume, new_volume, error):
# This is a volume swap initiated by Nova, not Cinder. Nova expects
# us to return the new_volume_id.
if not (volume['migration_status'] or new_volume['migration_status']):
return new_volume['id']
if not volume['migration_status']:
msg = _('Source volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
if not new_volume['migration_status']:
msg = _('Destination volume not mid-migration.')
raise exception.InvalidVolume(reason=msg)
expected_status = 'target:%s' % volume['id']
if not new_volume['migration_status'] == expected_status:
msg = (_('Destination has migration_status %(stat)s, expected '
'%(exp)s.') % {'stat': new_volume['migration_status'],
'exp': expected_status})
raise exception.InvalidVolume(reason=msg)
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume status must be available to update readonly flag.')
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': str(flag)})
@wrap_check_policy
def retype(self, context, volume, new_type, migration_policy=None):
"""Attempt to modify the type associated with an existing volume."""
if volume['status'] not in ['available', 'in-use']:
msg = _('Unable to update type due to incorrect status '
'on volume: %s') % volume['id']
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume['migration_status'] is not None:
msg = (_("Volume %s is already part of an active migration.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if migration_policy and migration_policy not in ['on-demand', 'never']:
msg = _('migration_policy must be \'on-demand\' or \'never\', '
'passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
# Support specifying volume type by ID or name
try:
if uuidutils.is_uuid_like(new_type):
vol_type = volume_types.get_volume_type(context, new_type)
else:
vol_type = volume_types.get_volume_type_by_name(context,
new_type)
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
vol_type_qos_id = vol_type['qos_specs_id']
old_vol_type = None
old_vol_type_id = volume['volume_type_id']
old_vol_type_qos_id = None
# Error if the original and new type are the same
if volume['volume_type_id'] == vol_type_id:
msg = (_('New volume_type same as original: %s') % new_type)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if volume['volume_type_id']:
old_vol_type = volume_types.get_volume_type(
context, old_vol_type_id)
old_vol_type_qos_id = old_vol_type['qos_specs_id']
# We don't support changing encryption requirements yet
old_enc = volume_types.get_volume_type_encryption(context,
old_vol_type_id)
new_enc = volume_types.get_volume_type_encryption(context,
vol_type_id)
if old_enc != new_enc:
msg = _('Retype cannot change encryption requirements')
raise exception.InvalidInput(reason=msg)
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
if (volume['status'] != 'available' and
old_vol_type_qos_id != vol_type_qos_id):
for qos_id in [old_vol_type_qos_id, vol_type_qos_id]:
if qos_id:
specs = qos_specs.get_qos_specs(context.elevated(), qos_id)
if specs['qos_specs']['consumer'] != 'back-end':
msg = _('Retype cannot change front-end qos specs for '
'in-use volumes')
raise exception.InvalidInput(reason=msg)
# We're checking here in so that we can report any quota issues as
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(context, volume,
vol_type_id)
self.update(context, volume, {'status': 'retyping'})
request_spec = {'volume_properties': volume,
'volume_id': volume['id'],
'volume_type': vol_type,
'migration_policy': migration_policy,
'quota_reservations': reservations}
self.scheduler_rpcapi.retype(context, CONF.volume_topic, volume['id'],
request_spec=request_spec,
filter_properties={})
def manage_existing(self, context, host, ref, name=None, description=None,
volume_type=None, metadata=None,
availability_zone=None):
if availability_zone is None:
elevated = context.elevated()
try:
service = self.db.service_get_by_host_and_topic(
elevated, host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unable to find service for given host.'))
availability_zone = service.get('availability_zone')
volume_type_id = volume_type['id'] if volume_type else None
volume_properties = {
'size': 0,
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
# Rename these to the internal name.
'display_description': description,
'display_name': name,
'host': host,
'availability_zone': availability_zone,
'volume_type_id': volume_type_id,
'metadata': metadata
}
# Call the scheduler to ensure that the host exists and that it can
# accept the volume
volume = self.db.volume_create(context, volume_properties)
request_spec = {'volume_properties': volume,
'volume_type': volume_type,
'volume_id': volume['id'],
'ref': ref}
self.scheduler_rpcapi.manage_existing(context, CONF.volume_topic,
volume['id'],
request_spec=request_spec)
return volume
class HostAPI(base.Base):
def __init__(self):
super(HostAPI, self).__init__()
"""Sub-set of the Volume Manager API for managing host operations."""
def set_host_enabled(self, context, host, enabled):
"""Sets the specified host's ability to accept new volumes."""
raise NotImplementedError()
def get_host_uptime(self, context, host):
"""Returns the result of calling "uptime" on the target host."""
raise NotImplementedError()
def host_power_action(self, context, host, action):
raise NotImplementedError()
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
volume evacuation.
"""
raise NotImplementedError()
| apache-2.0 | -8,834,907,751,923,250,000 | 41.749509 | 79 | 0.547163 | false | 4.663416 | false | false | false |
teopeurt/tapz | tapz/panels/intervals.py | 1 | 1969 | import datetime
class Interval(object):
"Base date interval class"
@classmethod
def range(cls, start, end):
"""
Return a range of datetime objects between start and end
"""
r = []
while start <= end:
r.append(start)
start += cls.delta
return r
@classmethod
def pack(cls, value):
"""
Pack a datetime object into a string
"""
return value.strftime(cls.pack_format_string)
@classmethod
def unpack(cls, value):
"""
Unpack a string into a datetime object
"""
return datetime.datetime.strptime(value, cls.pack_format_string)
@classmethod
def display_format(cls, rng):
return [d.strftime(cls.display_format_string) for d in rng]
@classmethod
def pack_format(cls, rng):
return [d.strftime(cls.pack_format_string) for d in rng]
class Hour(Interval):
display_name = 'Hour'
divides_into = None
pack_format_string = '%Y%m%d%H'
delta = datetime.timedelta(hours=1)
display_format_string = "%H"
class Day(Interval):
display_name = 'Day'
divides_into = Hour
pack_format_string = '%Y%m%d'
delta = datetime.timedelta(days=1)
display_format_string = "%m-%d-%y"
class Month(Interval):
display_name = 'Month'
divides_into = Day
pack_format_string = '%Y%m'
display_format_string = "%M %y"
@classmethod
def range(cls, start, end):
# there's no "months" arg for timedelta.
r = []
# reset the start date to the beginning of the month
start = datetime.datetime(year=start.year, month=start.month, day=1)
while start <= end:
r.append(start)
if start.month == 12:
start = datetime.datetime(year=start.year+1, month=1, day=1)
else:
start = datetime.datetime(year=start.year, month=start.month+1, day=1)
return r
| bsd-3-clause | -3,802,492,518,965,144,000 | 26.732394 | 86 | 0.586592 | false | 3.772031 | false | false | false |
GNOME/gnome-dvb-daemon | tests/test-dvb-daemon.py | 3 | 17249 | import gnomedvb
import unittest
import sys
import random
import datetime
import time
import re
from gi.repository import GLib
class DVBTestCase(unittest.TestCase):
def assertSuccessAndType(self, data, objtype):
self.assertType(data[1], bool)
self.assertTrue(data[1])
self.assertType(data[0], objtype)
def assertTypeAll(self, objlist, objtype):
for obj in objlist:
self.assertType(obj, objtype)
def assertType(self, obj, objtype):
if not isinstance(obj, objtype):
raise self.failureException, \
"%r is not %r" % (obj, objtype)
class TestManager(DVBTestCase):
def setUp(self):
self.manager = gnomedvb.DVBManagerClient()
def testGetChannelGroups(self):
data = self.manager.get_channel_groups()
self.assertType(data, list)
for cid, name in data:
self.assertType(cid, int)
self.assertType(name, str)
def testAddDeleteChannelGroup(self):
name = "Test Group %f" % random.random()
data = self.manager.add_channel_group(name)
self.assertSuccessAndType(data, int)
has_group = False
for gid, gname in self.manager.get_channel_groups():
if gid == data[0]:
self.assertEqual(name, gname)
has_group = True
break
self.assertTrue(has_group)
self.assertTrue(self.manager.remove_channel_group(data[0]))
def testAddDeviceNotExists(self):
adapter = 9
frontend = 0
self.assertFalse(self.manager.add_device_to_new_group (
adapter, frontend,
"channels.conf", "Recordings", "Test Group"))
class DeviceGroupTestCase(DVBTestCase):
def setUp(self):
self.manager = gnomedvb.DVBManagerClient()
self.devgroups = self.manager.get_registered_device_groups()
self.assertTypeAll(self.devgroups, gnomedvb.DVBDeviceGroupClient)
class TestDeviceGroup(DeviceGroupTestCase):
def testGetSetType(self):
for dg in self.devgroups:
dtype = dg.get_type()
self.assertType(dtype, str)
self.assert_(dtype in ("DVB-C", "DVB-S", "DVB-T"))
name_before = dg.get_name()
self.assertType(name_before, str)
new_name = "%s %f" % (name_before, random.random())
self.assertTrue(dg.set_name(new_name))
self.assertEqual(dg.get_name(), new_name)
self.assertTrue(dg.set_name(name_before))
def testGetMembers(self):
for dg in self.devgroups:
for member in dg.get_members():
self.assert_(member.startswith("/dev/dvb/adapter"))
def testGetRecordingsDirectory(self):
for dg in self.devgroups:
self.assertType(dg.get_recordings_directory(), str)
class TestScanner(DeviceGroupTestCase):
def setUp(self):
DeviceGroupTestCase.setUp(self)
self.path_regex = re.compile(r"/dev/dvb/adapter(\d+)/frontend(\d+)")
def testGetScanner(self):
for dg in self.devgroups:
for member in dg.get_members():
match = self.path_regex.search(member)
self.assertNotEqual(match, None)
adapter, frontend = match.group(1, 2)
scanner = self.manager.get_scanner_for_device(int(adapter),
int(frontend))
self.assertType(scanner, gnomedvb.DVBScannerClient)
data = {"frequency": GLib.Variant('u', 738000000),
"hierarchy": GLib.Variant('u', 0), # NONE
"bandwidth": GLib.Variant('u', 8), # 8MHz
"transmission-mode": GLib.Variant('s', "8k"),
"code-rate-hp": GLib.Variant('s', "2/3"),
"code-rate-lp": GLib.Variant('s', "NONE"),
"constellation": GLib.Variant('s', "QAM16"),
"guard-interval": GLib.Variant('u', 4),} # 1/4
success = scanner.add_scanning_data(data)
self.assertTrue(success)
self.assertType(success, bool)
scanner.run()
time.sleep(15)
scanner.destroy()
class TestChannelList(DeviceGroupTestCase):
def setUp(self):
DeviceGroupTestCase.setUp(self)
self.chanlists = []
for dg in self.devgroups:
self.chanlists.append(dg.get_channel_list())
self.assertTypeAll(self.chanlists, gnomedvb.DVBChannelListClient)
self.changroups = [data[0] for data in self.manager.get_channel_groups()]
def testGetChannels(self):
for cl in self.chanlists:
ids = cl.get_channels()
self.assertTypeAll(ids, long)
for cid in ids:
self.assertSuccessAndType(cl.get_channel_name(cid),
str)
self.assertSuccessAndType(cl.get_channel_network(cid),
str)
self.assertSuccessAndType(cl.get_channel_url(cid),
str)
def testGetChannelInfos(self):
for cl in self.chanlists:
for cid, name, is_radio in cl.get_channel_infos():
self.assertType(cid, long)
self.assertType(name, str)
self.assertType(is_radio, bool)
def testGetTVChannels(self):
for cl in self.chanlists:
ids = cl.get_tv_channels()
self.assertTypeAll(ids, long)
for cid in ids:
data = cl.is_radio_channel(cid)
self.assertSuccessAndType(data, bool)
self.assertFalse(data[0])
def testGetRadioChannels(self):
for cl in self.chanlists:
ids = cl.get_radio_channels()
self.assertTypeAll(ids, long)
for cid in ids:
data = cl.is_radio_channel(cid)
self.assertSuccessAndType(data, bool)
self.assertTrue(data[0])
def testGetChannelsOfGroup(self):
for cl in self.chanlists:
all_channels = set(cl.get_channels())
for gid in self.changroups:
data = cl.get_channels_of_group(gid)
self.assertTrue(data[1])
self.assertTypeAll(data[0], long)
group_chans = set(data[0])
other_chans = all_channels - group_chans
for chan in other_chans:
self.assertTrue(cl.add_channel_to_group(chan, gid))
data = cl.get_channels_of_group(gid)
self.assertTrue(chan in data[0])
self.assertTrue(cl.remove_channel_from_group(chan, gid))
def testChannelNotExists(self):
cid = 1000
for cl in self.chanlists:
self.assertFalse(cl.get_channel_name(cid)[1])
self.assertFalse(cl.get_channel_network(cid)[1])
self.assertFalse(cl.get_channel_url(cid)[1])
self.assertFalse(cl.is_radio_channel(cid)[1])
self.assertFalse(cl.add_channel_to_group(cid, 1000))
self.assertFalse(cl.remove_channel_from_group(cid, 1000))
class TestRecorder(DeviceGroupTestCase):
DURATION = 2
def _get_time_now(self):
nowt = datetime.datetime.now()
# We don't want (micro)seconds
now = datetime.datetime(nowt.year, nowt.month,
nowt.day, nowt.hour, nowt.minute)
return now
def setUp(self):
DeviceGroupTestCase.setUp(self)
self.recorder = []
self.channels = []
for dg in self.devgroups:
chanlist = dg.get_channel_list()
self.channels.append(chanlist.get_tv_channels()[0])
self.recorder.append(dg.get_recorder())
self.assertTypeAll(self.recorder, gnomedvb.DVBRecorderClient)
def _assert_time_equals(self, expected, actual):
self.assertTypeAll(actual, long)
self.assertEqual(len(actual), 5)
self.assertEqual(expected.year, actual[0])
self.assertEqual(expected.month, actual[1])
self.assertEqual(expected.day, actual[2])
self.assertEqual(expected.hour, actual[3])
self.assertEqual(expected.minute, actual[4])
def testAddTimer(self):
for i, rec in enumerate(self.recorder):
now = self._get_time_now()
delay = datetime.timedelta(hours=2)
delayed = now + delay
chan = self.channels[i]
data = rec.add_timer(chan, delayed.year, delayed.month,
delayed.day, delayed.hour, delayed.minute, self.DURATION * 2)
self.assertSuccessAndType(data, long)
rec_id = data[0]
data = rec.get_start_time(rec_id)
self.assertSuccessAndType(data, list)
start = data[0]
self._assert_time_equals(delayed, start)
data = rec.get_duration(rec_id)
self.assertSuccessAndType(data, long)
self.assertEqual(data[0], self.DURATION * 2)
self.assertTrue(rec.set_start_time(rec_id, now.year, now.month,
now.day, now.hour, now.minute))
data = rec.get_start_time(rec_id)
self.assertSuccessAndType(data, list)
start = data[0]
self._assert_time_equals(now, start)
self.assertTrue(rec.set_duration(rec_id, self.DURATION))
data = rec.get_duration(rec_id)
self.assertSuccessAndType(data, long)
self.assertEqual(data[0], self.DURATION)
time.sleep(10)
self.assert_(rec_id in rec.get_active_timers())
self.assertTrue(rec.is_timer_active(rec_id))
self.assertTrue(rec.has_timer(now.year, now.month, now.day,
now.hour, now.minute, self.DURATION))
data = rec.get_end_time(rec_id)
self.assertSuccessAndType(data, list)
end = data[0]
self.assertTypeAll(end, long)
self.assertEqual(len(end), 5)
endt = datetime.datetime(*end)
self.assertEqual(endt - now,
datetime.timedelta(minutes=self.DURATION))
self.assertSuccessAndType(rec.get_channel_name(rec_id),
str)
self.assertSuccessAndType(rec.get_title(rec_id), str)
data = rec.get_all_informations(rec_id)
self.assertSuccessAndType(data, tuple)
rid, duration, active, channel, title = data[0]
self.assertEqual(rid, rec_id)
self.assertEqual(duration, self.DURATION)
self.assertTrue(active)
self.assertType(channel, str)
self.assertType(title, str)
self.assertFalse(rec.set_start_time(rec_id, delayed.year,
delayed.month, delayed.day, delayed.hour, delayed.minute))
time.sleep(20)
self.assertTrue(rec.delete_timer(rec_id))
self.assertFalse(rec.has_timer(now.year, now.month, now.day,
now.hour, now.minute, self.DURATION))
def testTimerNotExists(self):
rec_id = 1000
for rec in self.recorder:
self.assertFalse(rec.delete_timer(rec_id))
self.assertFalse(rec.get_start_time(rec_id)[1])
self.assertFalse(rec.get_end_time(rec_id)[1])
self.assertFalse(rec.get_duration(rec_id)[1])
self.assertFalse(rec.get_channel_name(rec_id)[1])
self.assertFalse(rec.get_title(rec_id)[1])
self.assertFalse(rec.is_timer_active(rec_id))
self.assertFalse(rec.get_all_informations(rec_id)[1])
self.assertFalse(rec.set_start_time(rec_id, 2010, 1, 5, 15, 0))
class TestSchedule(DeviceGroupTestCase):
def setUp(self):
DeviceGroupTestCase.setUp(self)
self.schedules = []
for dg in self.devgroups:
chanlist = dg.get_channel_list()
for chan in chanlist.get_channels():
self.schedules.append(dg.get_schedule(chan))
self.assertTypeAll(self.schedules, gnomedvb.DVBScheduleClient)
def testGetAllEvents(self):
for sched in self.schedules:
for eid in sched.get_all_events():
self._get_event_details(sched, eid)
def _get_event_details(self, sched, eid):
self.assertSuccessAndType(sched.get_name(eid), str)
self.assertSuccessAndType(sched.get_short_description(eid),
str)
self.assertSuccessAndType(sched.get_extended_description(eid),
str)
self.assertSuccessAndType(sched.get_duration(eid), long)
data = sched.get_local_start_time(eid)
self.assertSuccessAndType(data, list)
self.assertTypeAll(data[0], long)
self.assertSuccessAndType(sched.get_local_start_timestamp(eid),
long)
self.assertSuccessAndType(sched.is_running(eid), bool)
self.assertSuccessAndType(sched.is_scrambled(eid), bool)
data = sched.get_informations(eid)
self.assertSuccessAndType(data, tuple)
eeid, next, name, duration, desc = data[0]
self.assertEqual(eeid, eid)
self.assertType(next, long)
self.assertType(name, str)
self.assertType(duration, long)
self.assertType(desc, str)
def testNowPlaying(self):
for sched in self.schedules:
eid = sched.now_playing()
self.assertType(eid, long)
if eid != 0:
self._get_event_details(sched, eid)
def testNext(self):
for sched in self.schedules:
eid = sched.now_playing()
while eid != 0:
eid = sched.next(eid)
self.assertType(eid, long)
def testEventNotExists(self):
eid = 1
for sched in self.schedules:
self.assertFalse(sched.get_name(eid)[1])
self.assertFalse(sched.get_short_description(eid)[1])
self.assertFalse(sched.get_extended_description(eid)[1])
self.assertFalse(sched.get_duration(eid)[1])
self.assertFalse(sched.get_local_start_time(eid)[1])
self.assertFalse(sched.get_local_start_timestamp(eid)[1])
self.assertFalse(sched.is_running(eid)[1])
self.assertFalse(sched.is_scrambled(eid)[1])
self.assertFalse(sched.get_informations(eid)[1])
class TestRecordingsStore(DVBTestCase):
def setUp(self):
self.recstore = gnomedvb.DVBRecordingsStoreClient()
def testGetRecordings(self):
rec_ids = self.recstore.get_recordings()
for rid in rec_ids:
self.assertSuccessAndType(self.recstore.get_channel_name(rid),
str)
self.assertSuccessAndType(self.recstore.get_location(rid),
str)
start_data = self.recstore.get_start_time(rid)
self.assertSuccessAndType(start_data, list)
start = start_data[0]
self.assertEqual(len(start), 5)
self.assertTypeAll(start, long)
self.assertSuccessAndType(self.recstore.get_start_timestamp(rid),
long)
self.assertSuccessAndType(self.recstore.get_length(rid),
long)
self.assertSuccessAndType(self.recstore.get_name (rid),
str)
self.assertSuccessAndType(self.recstore.get_description(rid),
str)
def testGetRecordingsNotExists(self):
rid = 1000
self.assertFalse(self.recstore.get_channel_name(rid)[1])
self.assertFalse(self.recstore.get_location(rid)[1])
self.assertFalse(self.recstore.get_start_time(rid)[1])
self.assertFalse(self.recstore.get_start_timestamp(rid)[1])
self.assertFalse(self.recstore.get_length(rid)[1])
self.assertFalse(self.recstore.get_name (rid)[1])
self.assertFalse(self.recstore.get_description(rid)[1])
def testGetAllInformations(self):
rec_ids = self.recstore.get_recordings()
for rid in rec_ids:
data = self.recstore.get_all_informations(rid)
self.assertType(data[1], bool)
self.assertTrue(data[1])
self.assertType(data[0], tuple)
rrid, name, desc, length, ts, chan, loc = data[0]
self.assertType(rrid, long)
self.assertEqual(rrid, rid)
self.assertType(name, str)
self.assertType(desc, str)
self.assertType(length, long)
self.assertType(ts, long)
self.assertType(chan, str)
self.assertType(loc, str)
def testGetAllInformationsNotExists(self):
rid = 1000
data = self.recstore.get_all_informations(rid)
self.assertType(data[1], bool)
self.assertFalse(data[1])
if __name__ == '__main__':
loop = GLib.MainLoop()
unittest.main()
loop.run()
| gpl-3.0 | 2,183,226,175,969,756,400 | 37.588367 | 81 | 0.575859 | false | 3.852803 | true | false | false |
stephrdev/brigitte | brigitte/repositories/migrations/0004_auto__add_repositoryuser__add_unique_repositoryuser_repo_user.py | 1 | 5420 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RepositoryUser'
db.create_table('repositories_repositoryuser', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('repo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['repositories.Repository'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('repositories', ['RepositoryUser'])
# Adding unique constraint on 'RepositoryUser', fields ['repo', 'user']
db.create_unique('repositories_repositoryuser', ['repo_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'RepositoryUser', fields ['repo', 'user']
db.delete_unique('repositories_repositoryuser', ['repo_id', 'user_id'])
# Deleting model 'RepositoryUser'
db.delete_table('repositories_repositoryuser')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'repositories.repository': {
'Meta': {'unique_together': "(('user', 'slug'),)", 'object_name': 'Repository'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'repositories.repositoryuser': {
'Meta': {'unique_together': "(('repo', 'user'),)", 'object_name': 'RepositoryUser'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['repositories.Repository']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['repositories']
| bsd-3-clause | -4,994,757,493,552,065,000 | 62.023256 | 182 | 0.564207 | false | 3.841247 | false | false | false |
takeshixx/deen | deen/plugins/assemblies/mips.py | 1 | 1991 | try:
import keystone
except ImportError:
keystone = None
try:
import capstone
except ImportError:
capstone = None
from .__base__ import AsmBase
class DeenPluginAsmMips(AsmBase):
name = 'assembly_mips'
display_name = 'MIPS'
aliases = ['asm_mips',
'mips32',
'mips']
cmd_name = 'assembly_mips'
cmd_help='Assemble/Disassemble for the MIPS architecture'
keystone_arch = keystone.KS_ARCH_MIPS \
if (keystone and hasattr(keystone, 'KS_ARCH_MIPS')) else None
keystone_mode = keystone.KS_MODE_MIPS32 \
if (keystone and hasattr(keystone, 'KS_MODE_MIPS32')) else None
capstone_arch = capstone.CS_ARCH_MIPS \
if (capstone and hasattr(capstone, 'CS_ARCH_MIPS')) else None
capstone_mode = capstone.CS_MODE_MIPS32 \
if (capstone and hasattr(capstone, 'CS_MODE_MIPS32')) else None
@staticmethod
def add_argparser(argparser, plugin_class, *args, **kwargs):
# Add an additional argument for big endian mode.
parser = AsmBase.add_argparser(argparser, plugin_class)
parser.add_argument('-e', '--big-endian', dest='bigendian',
default=False, help='use big endian',
action='store_true')
class DeenPluginAsmMips64(DeenPluginAsmMips):
name = 'assembly_mips64'
display_name = 'MIPS64'
aliases = ['asm_mips64',
'mips64']
cmd_name = 'assembly_mips64'
cmd_help='Assemble/Disassemble for the MIPS64 architecture'
keystone_arch = keystone.KS_ARCH_MIPS \
if (keystone and hasattr(keystone, 'KS_ARCH_MIPS')) else None
keystone_mode = keystone.KS_MODE_MIPS64 \
if (keystone and hasattr(keystone, 'KS_MODE_MIPS64')) else None
capstone_arch = capstone.CS_ARCH_MIPS \
if (capstone and hasattr(capstone, 'CS_ARCH_MIPS')) else None
capstone_mode = capstone.CS_MODE_MIPS64 \
if (capstone and hasattr(capstone, 'CS_MODE_MIPS64')) else None
| apache-2.0 | -2,235,022,324,194,722,800 | 36.566038 | 71 | 0.643395 | false | 3.420962 | false | false | false |
nioushajafari/iranNewsBot | bot.py | 1 | 3332 | # -*- coding: utf-8 -*-
import sys
import HTMLParser
import os
import urllib2
import tweepy
import json
from time import gmtime, strftime
from secrets import *
reload(sys)
sys.setdefaultencoding('utf-8')
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
hparser = HTMLParser.HTMLParser()
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
tweets = api.user_timeline('IranNewsBot')
# ====== Individual bot configuration ==========================
bot_username = 'IranNewsBot'
logfile_name = bot_username + ".log"
# ==============================================================
def get():
# Get the headlines, iterate through them to try to find a suitable one
page = 1
while page <= 3:
try:
request = urllib2.Request(
"http://content.guardianapis.com/search?format=json&page-size=50&page=" +
str(page) + "&api-key=" + GUARDIAN_KEY)
response = urllib2.urlopen(request)
except urllib2.URLError as e:
print(e.reason)
else:
items = json.loads(response.read());
for item in items['response']['results']:
headline = item['webTitle'].encode('utf-8', 'ignore')
h_split = headline.split()
# We don't want to use incomplete headlines
if "..." in headline:
continue
# Try to weed out all-caps headlines
if count_caps(h_split) >= len(h_split) - 3:
continue
# Remove attribution string
if "-" in headline:
headline = headline.split("-")[:-1]
headline = ' '.join(headline).strip()
if process(headline):
return
else:
page += 1
page += 1
# Log that no tweet could be made
f = open(os.path.join(__location__, "IranNewsBot.log"), 'a')
t = strftime("%d %b %Y %H:%M:%S", gmtime())
f.write("\n" + t + " No possible tweet.")
f.close()
def process(headline):
# Don't tweet anything that's too long
if len(headline) > 140:
return False
# only tweet if Iran is mentioned
if "Iran" in headline:
return tweet(headline)
else:
return False
def tweet(headline):
# Check that we haven't tweeted this before
for tweet in tweets:
if headline == tweet.text:
return False
# Log tweet to file
f = open(os.path.join(__location__, "IranNewsBot.log"), 'a')
t = strftime("%d %b %Y %H:%M:%S", gmtime())
f.write(("\n" + t + " " + headline).encode('utf-8', 'ignore'))
f.close()
# Post tweet
api.update_status(status=headline)
return True
def count_caps(headline):
count = 0
for word in headline:
if word[0].isupper():
count += 1
return count
def log(message):
"""Log message to logfile."""
path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(path, logfile_name), 'a+') as f:
t = strftime("%d %b %Y %H:%M:%S", gmtime())
f.write("\n" + t + " " + message)
if __name__ == "__main__":
get()
| mit | 8,835,655,319,347,536,000 | 25.444444 | 89 | 0.537215 | false | 3.756483 | false | false | false |
zaneb/matahari | src/python/matahari/shell/types.py | 1 | 9668 | # Copyright (C) 2011 Red Hat, Inc.
# Written by Zane Bitter <zbitter@redhat.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import uuid
class Host(object):
def __init__(self, state):
self.manager = state.manager
def __call__(self, param):
for h in self.manager.hosts():
if str(h) == param:
return h
raise ValueError('Host "%s" not found' % param)
def complete(self, param):
return filter(lambda s: s.startswith(param),
map(str, self.manager.hosts()))
class SharedInfo(object):
def __init__(self, default=None):
self.last = self.default = default
def set(self, last):
self.last = last
def setdefault(self):
self.last = self.default
def get(self):
last, self.last = self.last, self.default
return last
class Package(object):
def __init__(self, state, info):
self.state = state
self.info = info
self.info.setdefault()
def __call__(self, param):
if param not in self.state.list_packages():
self.info.setdefault()
raise ValueError('Package "%s" not found' % param)
self.info.set(param)
return param
def default(self):
self.info.setdefault()
return self.info.default
def complete(self, param):
packages = self.state.list_packages()
if param in packages:
self.info.set(param)
else:
self.info.setdefault()
return [p + ' ' for p in packages if p.startswith(param)]
class Class(object):
def __init__(self, state, pkginfo):
self.state = state
self.pkginfo = pkginfo
def __call__(self, param):
package = self.pkginfo.get()
if param not in self.state.list_classnames(package):
fq = package is not None and ('%s:%s' % (package, param)) or param
raise ValueError('Class "%s" not found' % fq)
return param
def complete(self, param):
classnames = self.state.list_classnames(self.pkginfo.get())
return [c + ' ' for c in classnames if c.startswith(param)]
QMF_TYPES = (U8, U16, U32, U64,
_UNUSED,
SSTR, LSTR,
ABSTIME, DELTATIME,
REF,
BOOL,
FLOAT, DOUBLE,
UUID,
FTABLE,
S8, S16, S32, S64,
OBJECT, LIST, ARRAY) = range(1, 23)
class QMFProperty(object):
"""CLI verification class for a QMF Property argument"""
def __new__(cls, arg):
"""Create a CLI argument of the correct type from a SchemaArg."""
if cls == QMFProperty:
return _qmfPropertyTypes.get(arg.type, String)(arg)
else:
return super(QMFProperty, cls).__new__(cls, arg)
def __init__(self, arg):
self.arg = arg
self.__name__ = str(self)
def __repr__(self):
return self.__class__.__name__.lower()
def __str__(self):
return self.arg.name
def help(self):
return ('%9s %-18s %s' % (repr(self),
str(self),
self.arg.desc or '')).rstrip()
def default(self):
return self.arg.default
class String(QMFProperty):
"""A QMF String property argument"""
def __repr__(self):
if self.arg.type not in _qmfPropertyTypes:
return str(self)
return self.arg.type == SSTR and 'sstr' or 'lstr'
def __call__(self, param):
maxlen = self.arg.maxlen or (self.arg.type == SSTR and 255) or 65535
if len(param) > maxlen:
raise ValueError('Parameter is too long')
return param
class Bool(QMFProperty):
"""A QMF Boolean property argument"""
TRUTHY = ('true', 't', '1', 'y', 'yes')
FALSEY = ('false', 'f', '0', 'n', 'no')
def __call__(self, param):
lc = param.lower()
if lc in self.TRUTHY:
return True
if lc in self.FALSEY:
return False
try:
value = eval(param, {}, {})
except (NameError, SyntaxError):
raise ValueError('"%s" is not a boolean' % (param,))
if not isinstance(value, (int, bool)):
raise ValueError('"%s" is not a boolean' % (param,))
return bool(value)
def complete(self, param):
if not param:
return map(lambda l: l[0].capitalize(), (self.TRUTHY, self.FALSEY))
lc = param.lower()
matches = []
for v in self.TRUTHY + self.FALSEY:
if v == lc:
return [param + ' ']
if v.startswith(lc):
return [param + v[len(param):] + ' ']
return []
class Int(QMFProperty):
"""A QMF Integer property argument"""
NAMES = {U8: 'u8', U16: 'u16', U32: 'u32', U64: 'u64',
S8: 's8', S16: 's16', S32: 's32', S64: 's64'}
MINIMUM = {U8: 0, U16: 0,
U32: 0, U64: 0,
S8: -(1 << 7), S16: -(1 << 15),
S32: -(1 << 31), S64: -(1 << 63)}
MAXIMUM = {U8: (1 << 8) - 1, U16: (1 << 16) - 1,
U32: (1 << 32) - 1, U64: (1 << 64) - 1,
S8: (1 << 7) - 1, S16: (1 << 15) - 1,
S32: (1 << 31) - 1, S64: (1 << 63) - 1}
def __str__(self):
if self.arg.min is not None or self.arg.max is not None:
return '<%d-%d>' % (self._min(), self._max())
return QMFProperty.__str__(self)
def __repr__(self):
try:
return self.NAMES[self.arg.type]
except KeyError:
return QMFProperty.__repr__(self)
def _min(self):
"""Get the minimum allowed value"""
if self.arg.min is not None:
return self.arg.min
try:
return self.MINIMUM[self.arg.type]
except KeyError:
return -(1 << 31)
def _max(self):
"""Get the maximum allowed value"""
if self.arg.max is not None:
return self.arg.max
try:
return self.MAXIMUM[self.arg.type]
except KeyError:
return (1 << 31) - 1
def __call__(self, param):
value = int(param)
if value < self._min():
raise ValueError('Value %d underflows minimum of range (%d)' %
(value, self._min()))
if value > self._max():
raise ValueError('Value %d overflows maximum of range (%d)' %
(value, self._max()))
return value
class Float(QMFProperty):
"""A QMF Floating Point property argument"""
def __repr__(self):
return self.arg.type == FLOAT and 'float' or 'double'
def __call__(self, param):
return float(param)
class Uuid(QMFProperty):
"""A QMF UUID property argument"""
LENGTH = 32
def __call__(self, param):
return uuid.UUID(param)
def complete(self, param):
raw = param.replace('-', '')
try:
val = int(param, 16)
except ValueError:
return []
if len(raw) in (8, 12, 16, 20):
return [param + '-']
if len(raw) == self.LENGTH:
return [param + ' ']
return ['']
class List(QMFProperty):
"""A QMF List property argument"""
def __call__(self, param):
try:
l = eval(param, {}, {})
except (NameError, SyntaxError):
raise ValueError('"%s" is not a valid list' % (param,))
if not isinstance(l, list):
raise ValueError('"%s" is not a list' % (param,))
return l
def complete(self, param):
if not param:
return ['[']
if not param.startswith('['):
return []
if param.endswith(']'):
try:
self(param)
except ValueError:
return []
return [param + ' ']
return ['']
class Map(QMFProperty):
"""A QMF Map property argument"""
def __call__(self, param):
try:
l = eval(param, {}, {})
except (NameError, SyntaxError):
raise ValueError('"%s" is not a valid map' % (param,))
if not isinstance(l, dict):
raise ValueError('"%s" is not a map' % (param,))
return dict((unicode(k), v) for k, v in l.items())
def complete(self, param):
if not param:
return ['{']
if not param.startswith('{'):
return []
if param.endswith('}'):
try:
self(param)
except ValueError:
return []
return [param + ' ']
return ['']
_qmfPropertyTypes = {
U8: Int,
U16: Int,
U32: Int,
U64: Int,
SSTR: String,
LSTR: String,
BOOL: Bool,
FLOAT: Float,
DOUBLE: Float,
UUID: Uuid,
FTABLE: Map,
S8: Int,
S16: Int,
S32: Int,
S64: Int,
LIST: List,
ARRAY: List,
}
| gpl-2.0 | 5,319,773,342,217,698,000 | 28.386018 | 79 | 0.519032 | false | 3.797329 | false | false | false |
dkdeconti/HLA_epitope_prediction_from_WES | nethmc_parse.py | 1 | 2758 | #! /usr/bin/env python
"""
Parses the NetHMC output into TSV sorted by affinity.
"""
import argparse
import itertools
import re
import sys
from operator import itemgetter
def parse_fasta_for_names(filename):
"""
"""
name_dict = {}
with open(filename, 'rU') as handle:
for line in handle:
if not re.match(">", line):
continue
name = line.strip('\n')[1:]
name_sub = name[:15]
name_dict[name_sub] = name
return name_dict
def parse_tsv(filename, name_dict):
"""
"""
output_matrix = []
with open(filename, 'rU') as handle:
curr_protein = []
for line in handle:
if line[0] == "#" or line[0] == "-" or len(line.strip('\n')) < 1:
continue
if re.match("Protein", line):
continue
arow = line.strip('\n').split()
if arow[0] == "pos":
continue
arow[12] = float(arow[12])
if len(arow[10].split('-')) == 3:
#arow = arow[:10] + arow[10].split('_') + arow[11:]
arow = arow[:10] + name_dict[arow[10]].split('-') + arow[11:]
#print arow
output_matrix.append(arow)
return output_matrix
def main():
"""
Arg parsing and central dispatch.
"""
# arg parsing
parser = argparse.ArgumentParser(description="Parse NetHMC output to TSV.")
parser.add_argument("-f", "--fasta", metavar="FASTA",
help="fasta file for epitopes input into nethmc")
parser.add_argument("nethmc", metavar="NETHMC_OUTPUT",
nargs='+',
help="Output file from NetHMC")
args = parser.parse_args()
# Central dispatch
name_dict = parse_fasta_for_names(args.fasta)
lom = list(itertools.chain.from_iterable([parse_tsv(nethmc, name_dict)
for nethmc in args.nethmc]))
#print set([len(v) for v in list(itertools.chain.from_iterable(lom))])
nethmc_matrix = sorted([v for v in lom if len(v) >= 18 and v[16] <= 50],
key=itemgetter(14))
sys.stdout.write('\t'.join(["pos", "HLA", "peptide", "Core Offset", "I_pos",
"I_len", "D_pos", "D_len", "iCore",
"identity", "chrom", "genomic_pos",
"gene_symbol", "ref_allele", "alt_allele",
"1-log50k(aff)", "Affinity(nM)",
"%Rank BindLevel"]) + '\n')
for v in nethmc_matrix:
sys.stdout.write('\t'.join([str(i) for i in v[:18]]) + '\n')
#nethmc_matrix
if __name__ == "__main__":
main()
| mit | 535,049,607,968,474,560 | 31.833333 | 80 | 0.495286 | false | 3.662683 | false | false | false |
fromleaf/study_two_scoops | two_scoops/taste/views.py | 1 | 2250 | # -*- coding: utf-8 -*-
from cached_property import cached_property
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic import (
ListView, UpdateView, TemplateView, DetailView, CreateView
)
from django.core.urlresolvers import reverse
from braces.views import LoginRequiredMixin
from store.exceptions import OutOfStock
from .models import Flavor, Tasting
from .tasks import update_users_who_favorited
def list_flavor_line_item(sku):
try:
return Flavor.objects.get(sku=sku, quantity__gt=0)
except Flavor.DoesNotExist:
msg = "We are out of {0}".format(sku)
raise OutOfStock(msg)
def list_any_line_item(model, sku):
try:
return model.objects.get(sku=sku, quantity__gt=0)
except ObjectDoesNotExist:
msg = "We are out of {0}".format(sku)
raise OutOfStock(msg)
class TasteListView(ListView):
model = Tasting
class TasteDetailView(DetailView):
model = Tasting
class TasteResultView(TasteDetailView):
template_name = "taste/results.html"
class TasteUpdateView(UpdateView):
model = Tasting
def get_success_url(self):
return reverse("taste:detail", kwargs={"pk": self.object.pk})
class FreshFruitMixin(object):
@cached_property
def likes_and_favorites(self):
likes = self.object.likes()
favorites = self.object.favorites()
return {
"likes": likes,
"favorites": favorites,
"favorites_count": favorites.count(),
}
def get_context_data(self, **kwargs):
context = super(FreshFruitMixin, self).get_context_data(**kwargs)
context["has_fresh_fruit"] = True
return context
class FruityFlavorView(FreshFruitMixin, TemplateView):
template_name = "taste/fruity_flavor.html"
class FlavorDetailView(LoginRequiredMixin, DetailView):
model = Flavor
class FlavorCreateView(LoginRequiredMixin, CreateView):
model = Flavor
fields = ('title', 'slug', 'scoops_remaining')
def form_valid(self, form):
update_users_who_favorited(
instances=self.object,
favorites=self.likes_and_favorites['favorites']
)
return super(FlavorCreateView, self).form_valid(form)
| gpl-3.0 | 7,807,723,323,739,347,000 | 24.280899 | 73 | 0.680444 | false | 3.646677 | false | false | false |
xguse/spartan | src/spartan/utils/sandbox.py | 1 | 4836 | from string import Template
from collections import deque
from decimal import Decimal
import pandas as pd
from spartan.utils.errors import *
from spartan.utils.misc import Bunch,fold_seq
def meme_minimal2transfac(meme_path,out_path):
"""
"""
meme_deck = deque(open(meme_path,'rU'))
#raise Exception
transfac_out = open(out_path,'w')
try:
while meme_deck:
motif = Bunch()
try:
motif.names = get_next_names(meme_deck)
motif.matrix = get_next_matrix(meme_deck)
motif.url = get_next_url(meme_deck)
write_next_transfac_motif(motif,transfac_out)
except StopIteration:
raise
except Exception as exc:
if len(meme_deck) == 0:
pass
else:
raise exc
finally:
transfac_out.close()
def get_next_names(meme_deck):
while meme_deck:
line = meme_deck.popleft()
if line.startswith('MOTIF'):
return line.strip().split()[1:]
else:
# chew through lines until we find the next MOTIF
pass
def get_next_matrix(meme_deck):
matrix = []
mat_info = Bunch()
# collect mat_info
while meme_deck:
line = meme_deck.popleft()
if line.startswith('letter-probability matrix:'):
line = line.strip().replace('letter-probability matrix:','').replace('= ','=').split()
for attr in line:
attr = attr.split('=')
mat_info[attr[0]] = attr[1]
break
else:
# chew through lines until we find the next matrix data
pass
# collect matrix data
while meme_deck:
line = meme_deck.popleft()
if line.startswith('\n'):
break
else:
position = pd.Series([Decimal(i) for i in line.strip().split()],index=['A','C','G','T'])
matrix.append(position)
# confirm correct length
if len(matrix) == int(mat_info.w):
matrix = pd.DataFrame(matrix)
else:
raise SanityCheckError('length of matrix (%s) does not equal "w" attribute (%s) from "letter-probability matrix" line.'
% (len(matrix),mat_info.w))
# convert probabilities into counts
matrix = (matrix.applymap(lambda x: round(x,5)) * int(mat_info.nsites)).applymap(int)
# confirm all positions sum to the same value
#if len(set(matrix.sum(1))) == 1:
#pass
#else:
#raise SanityCheckError('all positions in matrix should sum to the same value. Encountered:\n%s' % (str(matrix.sum(1))))
return matrix
def get_next_url(meme_deck):
while meme_deck:
line = meme_deck.popleft()
if line.startswith('URL'):
return line.strip().split()[-1]
else:
# chew through lines till we get to 'URL'
pass
def write_next_transfac_motif(motif,transfac_out):
"""
AC accession number
ID any_old_name_for_motif_1
BF species_name_for_motif_1
P0 A C G T
01 1 2 2 0 S
02 2 1 2 0 R
03 3 0 1 1 A
04 0 5 0 0 C
05 5 0 0 0 A
06 0 0 4 1 G
07 0 1 4 0 G
08 0 0 0 5 T
09 0 0 5 0 G
10 0 1 2 2 K
11 0 2 0 3 Y
12 1 0 3 1 G
XX
//
"""
name = motif.names[1]
ac = '_'.join(motif.names)
species = 'none_listed' #TODO: handle species field
#TODO: write a REAL consensus function that uses IUPAC degen code
matrix_line = Template('MA\t$pos\t$A\t$C\t$G\t$T\t$major_nuc\n')
#transfac_out.write('AC %s\n' % (ac))
#transfac_out.write('XX\n')
transfac_out.write('NA\t%s\n' % (name))
#transfac_out.write('XX\n')
transfac_out.write('BF\t%s\n' % (species))
#transfac_out.write('P0\tA\tC\tG\tT\n')
transfac_out.write('XX\n')
for i in list(motif.matrix.index):
m = motif.matrix
fields = dict(pos='%02d' % (i+1),
A=m.ix[i,'A'],
C=m.ix[i,'C'],
G=m.ix[i,'G'],
T=m.ix[i,'T'],
major_nuc=m.ix[i].idxmax())
transfac_out.write(matrix_line.substitute(fields))
#transfac_out.write('XX\n')
#transfac_out.write('CC %s\n' % (motif.url))
transfac_out.write('XX\n//\n')
| mit | 202,428,726,396,044,200 | 27.964072 | 128 | 0.490281 | false | 3.550661 | false | false | false |
mferenca/HMS-ecommerce | ecommerce/extensions/api/exceptions.py | 2 | 1277 | """Exceptions and error messages used by the ecommerce API."""
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException
PRODUCT_OBJECTS_MISSING_DEVELOPER_MESSAGE = u"No product objects could be found in the request body"
PRODUCT_OBJECTS_MISSING_USER_MESSAGE = _("You can't check out with an empty basket.")
SKU_NOT_FOUND_DEVELOPER_MESSAGE = u"SKU missing from a requested product object"
SKU_NOT_FOUND_USER_MESSAGE = _("We couldn't locate the identification code necessary to find one of your products.")
PRODUCT_NOT_FOUND_DEVELOPER_MESSAGE = u"Catalog does not contain a product with SKU [{sku}]"
PRODUCT_NOT_FOUND_USER_MESSAGE = _("We couldn't find one of the products you're looking for.")
PRODUCT_UNAVAILABLE_DEVELOPER_MESSAGE = u"Product with SKU [{sku}] is [{availability}]"
PRODUCT_UNAVAILABLE_USER_MESSAGE = _("One of the products you're trying to order is unavailable.")
class ApiError(Exception):
"""Standard error raised by the API."""
pass
class ProductNotFoundError(ApiError):
"""Raised when the provided SKU does not correspond to a product in the catalog."""
pass
class BadRequestException(APIException):
status_code = status.HTTP_400_BAD_REQUEST
| agpl-3.0 | 1,228,558,517,401,397,200 | 40.193548 | 116 | 0.761942 | false | 3.917178 | false | false | false |
robhowley/nextbeeronme | src/db/beersimilarity.py | 1 | 5903 |
# access to the beer similarity table
# cli: truncate beersimilarity, load unique feature set
# and recompute all beer similarities
from datetime import datetime as dt
from tableacc import TableAcc
class BeerSimilarity(TableAcc):
def __init__(self):
super(BeerSimilarity, self).__init__(
table_name='beersimilarity',
cols=['beer_id_ref', 'beer_id_comp', 'similarity'],
upsert_proc='similarityupsert')
def similarity(self, beer_id_ref, beer_id_comp):
return self._select(
cols=["similarity"],
where="beer_id_ref = %s and beer_id_comp = %s",
params=(beer_id_ref,beer_id_comp) )
def similar_beers(self, beer_id, ordered=True, top=0):
return self._select(
cols=["beer_id_comp","similarity"],
where="beer_id_ref = %s",
order_by="similarity desc",
limit=top,
params=(beer_id,) )
def smooth_similarity(self, similarity_records):
self._exec_many_procs("similaritysmooth", similarity_records)
def __asyncable_similarity(tup):
# bs, beer_id_ref, ref_vect, s_ids, b_ids, X_t, top = tup
# bs: beer similarity object for db commit
# ref_vects from one style
# ref_b_ids: beer ids for ref vecs
# s_ids, b_ids: style and beer indices of X_t
# X_t for beers in other styles to be compared to
# keep top similarities by style
bs, b_refs, X_t_ref, b_comps, X_t_comp, top = tup
start = dt.now()
print 'Beer ct %s vs ct %s: Compute Similarity' % (len(b_refs),len(b_comps))
try:
for i in xrange(len(b_refs)):
# compute similarity between beer_ref[i] and all b_comps
lk = linear_kernel(X_t_ref.getrow(i), X_t_comp).flatten()
# take #top of largest similarities
n = len(lk)
kp = min(top, n)
m_ixs = lk.argsort()[-kp:]
sims = [ (b_refs[i], b_comps[j], lk[j]) for j in m_ixs if b_refs[i] != b_comps[j] ]
#bs.smooth_similarity(sims)
bs.add_many(sims)
print 'Comparison Complete: %s' % (dt.now() - start)
return (b_refs, None)
except Exception as e:
return (b_refs, e)
def __asyncable_transform(tup):
vectorizer, style_id, X = tup
print 'Vectorize %s: start' % style_id
start = dt.now()
X_t = vectorizer.transform(X['review'])
print 'Vectorize %s: done %s' % (style_id, (dt.now()-start))
return (style_id, X['beer_id'].values, X_t)
if __name__ == "__main__":
import pickle
import numpy as np
import pandas as pd
from multiprocessing import Pool
from datetime import datetime as dt
# get access to custom vectorizer
import sys
sys.path.append('src/')
from reviewvectorizer import ReviewTfidf
from styles import Styles
from basewordcts import expanded_stop_words
from reviewfeatures import ReviewFeatures
# same as cosine_similarity for normalized vectors
from sklearn.metrics.pairwise import linear_kernel
# pickling helper func
def pkl_l(src):
with open(src, 'rb') as f:
res = pickle.loads(f.read())
return res
# loading/building vectorizer
def load_vec(vec_pkl):
try:
# load pickled vectorizer if available
return True, pkl_l(vec_pkl)
except Exception as e:
print "Pickled vectorizer not found."
print "Must run styletfidfnb.py to build model"
return False, None
def recompute_and_populate():
"""
- load pickled vectorizer
- transform docs
- compute cosine similarity for all vector pairs
- data is retrieved at rev_rollup_ct = 1 (beer level)
"""
vec_pkl = "src/vocab/review_vectorizer.p"
was_pkl, vec = load_vec(vec_pkl)
# load data for styles with feature sets
# overridden until full feature table is populated
styles = Styles()
top_sy = [159, 84, 157, 56, 58, 9, 128, 97, 116, 140]
print 'Comparing the top %s styles: %s' % (len(top_sy), ', '.join(str(s) for s in top_sy))
X = styles.beer_reviews_rollup(top_sy, limit=0, rev_rollup_ct=1, shuffle=False)
if was_pkl:
print "Loaded pickled vectorizer."
print "Feature count: %s" % len(vec.get_feature_names())
print "Transforming reviews"
trans_pool = Pool(min(10,len(top_sy)))
res_t = trans_pool.map(__asyncable_transform,
[ (vec, sy, X[ X['style_id'] == sy ]) for sy in top_sy])
# as style keyed dict
res_t = {
r[0]: {
'beer_ids': r[1],
'X_t': r[2]
} for r in res_t
}
else:
# exit program
return 0
print 'Truncating similarity table'
bs = BeerSimilarity()
# bs.remove_all()
dim1 = sum(v['X_t'].shape[0] for k,v in res_t.iteritems())
dim2 = sum(len(v['X_t'].data) for k,v in res_t.iteritems())
print 'Computing similarities and saving to db %s' % dim1
print 'Nonzero elements %s' % dim2
# set style RU
# will account for symmetry in the database
# ru_sids = [ (top_sy[i], top_sy[j]) for i in xrange(len(top_sy)) for j in xrange(i,len(top_sy)) ]
ru_sids = [ (top_sy[i], top_sy[i]) for i in xrange(len(top_sy)) ]
pool_inp = []
for ruc in ru_sids:
X_t_ref = res_t[ruc[0]]['X_t']
b_id_ref = res_t[ruc[0]]['beer_ids']
X_t_comp = res_t[ruc[1]]['X_t']
b_id_comp = res_t[ruc[1]]['beer_ids']
pool_inp.append((bs, b_id_ref, X_t_ref, b_id_comp, X_t_comp, 100))
p = Pool(min(10,len(top_sy)))
b_id_res = p.map(__asyncable_similarity, pool_inp)
for res in b_id_res:
if res[1] is not None:
print '%s %s' % (', '.join(str(r) for r in res[0]), res[1])
# start main
while (True):
inp = 'y' #raw_input("Are you sure you want to overwrite beerad.beersimilarity? [y/n] ")
inp = inp.strip().lower()
if inp == 'n':
break
elif inp == 'y':
recompute_and_populate()
break | apache-2.0 | -2,260,625,925,836,058,000 | 27.941176 | 101 | 0.597832 | false | 3.121629 | false | false | false |
kniz/World | build/m.css/pelican-plugins/latex2svg.py | 2 | 5665 | #!/usr/bin/env python3
"""latex2svg
Read LaTeX code from stdin and render a SVG using LaTeX + dvisvgm.
"""
__version__ = '0.1.0'
__author__ = 'Tino Wagner'
__email__ = 'ich@tinowagner.com'
__license__ = 'MIT'
__copyright__ = '(c) 2017, Tino Wagner'
import os
import sys
import subprocess
import shlex
import re
from tempfile import TemporaryDirectory
from ctypes.util import find_library
default_template = r"""
\documentclass[{{ fontsize }}pt,preview]{standalone}
{{ preamble }}
\begin{document}
\begin{preview}
{{ code }}
\end{preview}
\end{document}
"""
default_preamble = r"""
\usepackage[utf8x]{inputenc}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{newtxtext}
\usepackage[libertine]{newtxmath}
"""
latex_cmd = 'latex -interaction nonstopmode -halt-on-error'
dvisvgm_cmd = 'dvisvgm --no-fonts'
default_params = {
'fontsize': 12, # pt
'template': default_template,
'preamble': default_preamble,
'latex_cmd': latex_cmd,
'dvisvgm_cmd': dvisvgm_cmd,
'libgs': None,
}
if not hasattr(os.environ, 'LIBGS') and not find_library('gs'):
if sys.platform == 'darwin':
# Fallback to homebrew Ghostscript on macOS
homebrew_libgs = '/usr/local/opt/ghostscript/lib/libgs.dylib'
if os.path.exists(homebrew_libgs):
default_params['libgs'] = homebrew_libgs
if not default_params['libgs']:
print('Warning: libgs not found')
def latex2svg(code, params=default_params, working_directory=None):
"""Convert LaTeX to SVG using dvisvgm.
Parameters
----------
code : str
LaTeX code to render.
params : dict
Conversion parameters.
working_directory : str or None
Working directory for external commands and place for temporary files.
Returns
-------
dict
Dictionary of SVG output and output information:
* `svg`: SVG data
* `width`: image width in *em*
* `height`: image height in *em*
* `depth`: baseline position in *em*
"""
if working_directory is None:
with TemporaryDirectory() as tmpdir:
return latex2svg(code, params, working_directory=tmpdir)
fontsize = params['fontsize']
document = (params['template']
.replace('{{ preamble }}', params['preamble'])
.replace('{{ fontsize }}', str(fontsize))
.replace('{{ code }}', code))
with open(os.path.join(working_directory, 'code.tex'), 'w') as f:
f.write(document)
# Run LaTeX and create DVI file
try:
ret = subprocess.run(shlex.split(params['latex_cmd']+' code.tex'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory)
# LaTeX prints errors on stdout instead of stderr (stderr is empty),
# so print stdout instead
if ret.returncode: print(ret.stdout.decode('utf-8'))
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('latex not found')
# Add LIBGS to environment if supplied
env = os.environ.copy()
if params['libgs']:
env['LIBGS'] = params['libgs']
# Convert DVI to SVG
try:
ret = subprocess.run(shlex.split(params['dvisvgm_cmd']+' code.dvi'),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=working_directory, env=env)
if ret.returncode: print(ret.stderr.decode('utf-8'))
ret.check_returncode()
except FileNotFoundError:
raise RuntimeError('dvisvgm not found')
with open(os.path.join(working_directory, 'code.svg'), 'r') as f:
svg = f.read()
# Parse dvisvgm output for size and alignment
def get_size(output):
regex = r'\b([0-9.]+)pt x ([0-9.]+)pt'
match = re.search(regex, output)
if match:
return (float(match.group(1)) / fontsize,
float(match.group(2)) / fontsize)
else:
return None, None
def get_measure(output, name):
regex = r'\b%s=([0-9.e-]+)pt' % name
match = re.search(regex, output)
if match:
return float(match.group(1)) / fontsize
else:
return None
output = ret.stderr.decode('utf-8')
width, height = get_size(output)
depth = get_measure(output, 'depth')
return {'svg': svg, 'depth': depth, 'width': width, 'height': height}
def main():
"""Simple command line interface to latex2svg.
- Read from `stdin`.
- Write SVG to `stdout`.
- Write metadata as JSON to `stderr`.
- On error: write error messages to `stdout` and return with error code.
"""
import json
import argparse
parser = argparse.ArgumentParser(description="""
Render LaTeX code from stdin as SVG to stdout. Writes metadata (baseline
position, width, height in em units) as JSON to stderr.
""")
parser.add_argument('--preamble',
help="LaTeX preamble code to read from file")
args = parser.parse_args()
preamble = default_preamble
if args.preamble is not None:
with open(args.preamble) as f:
preamble = f.read()
latex = sys.stdin.read()
try:
params = default_params.copy()
params['preamble'] = preamble
out = latex2svg(latex, params)
sys.stdout.write(out['svg'])
meta = {key: out[key] for key in out if key != 'svg'}
sys.stderr.write(json.dumps(meta))
except subprocess.CalledProcessError as exc:
print(exc.output.decode('utf-8'))
sys.exit(exc.returncode)
if __name__ == '__main__':
main()
| lgpl-2.1 | -4,294,388,438,650,157,000 | 29.456989 | 78 | 0.60812 | false | 3.741744 | false | false | false |
scienceopen/pyimagevideo | FPS_matplotlib_image.py | 1 | 2891 | #!/usr/bin/env python
"""
demo of measuring FPS performance with Matplotlib and OpenCV
i.e. how fast can I update an image plot
Example:
$ python FPS_matplotlib_image.py
matplotlib 3.0.2 imshow average FPS 27.66 over 100 frames.
matplotlib 3.0.2 pcolormesh average FPS 6.76 over 100 frames.
OpenCV 3.4.3 average FPS 226.59 over 100 frames.
Caveats:
1) I compiled OpenCV with OpenCL--it's possible imshow is using the GPU on my laptop (not sure if imshow uses the GPU)
2) This is an average measurement, so it doesn't capture bogdowns in the frame rate.
3) you must normalize your data on a [0,255] range for cv2.imshow
It's just a very simple comparison, showing OpenCV's huge FPS advantage
NOTE: we use pause(1e-3) as pause(1e-6) yields the same FPS, but doesn't give visible updates. A race condition in Matplotlib?
"""
import numpy as np
from numpy.random import rand
import matplotlib
from matplotlib.pyplot import figure, draw, pause, close
from time import time
from typing import Tuple
try:
import cv2
except ImportError:
cv2 = None
#
Nfps = 100
def randomimg(xy: Tuple[int, int]) -> np.ndarray:
"""
generate two image frames to toggle between
"""
return (rand(2, xy[0], xy[1]) * 255).astype(np.uint8)
def fpsmatplotlib_imshow(dat: np.ndarray):
fg = figure()
ax = fg.gca()
h = ax.imshow(dat[0, ...])
ax.set_title('imshow')
tic = time()
for i in range(Nfps):
h.set_data(dat[i % 2, ...])
draw(), pause(1e-3)
close(fg)
return Nfps / (time() - tic)
def fpsmatplotlib_pcolor(dat: np.ndarray):
fg = figure()
ax = fg.gca()
h = ax.pcolormesh(dat[0, ...])
ax.set_title('pcolormesh')
ax.autoscale(True, tight=True)
tic = time()
for i in range(Nfps):
h.set_array(dat[i % 2, ...].ravel())
draw(), pause(1e-3)
close(fg)
return Nfps / (time() - tic)
def fpsopencv(dat: np.ndarray):
tic = time()
for i in range(Nfps):
cv2.imshow('fpstest', dat[i % 2, ...])
cv2.waitKey(1) # integer milliseconds, 0 makes wait forever
cv2.destroyAllWindows()
return Nfps / (time() - tic)
if __name__ == '__main__':
from argparse import ArgumentParser
p = ArgumentParser(description='measure FPS for rapidly updating plot with Matplotlib vs. OpenCV')
p.add_argument('-p', '--xypixels', help='number of pixels for x and y', type=int, default=(512, 512))
P = p.parse_args()
dat = randomimg(P.xypixels)
fpsmat = fpsmatplotlib_imshow(dat)
print(f'matplotlib {matplotlib.__version__} imshow average FPS {fpsmat:.2f} over {Nfps} frames.')
fpsmat = fpsmatplotlib_pcolor(dat)
print(f'matplotlib {matplotlib.__version__} pcolormesh average FPS {fpsmat:.2f} over {Nfps} frames.')
if cv2:
fpscv = fpsopencv(dat)
print(f'OpenCV {cv2.__version__} average FPS {fpscv:.2f} over {Nfps} frames.')
| gpl-3.0 | -7,584,691,287,962,307,000 | 29.431579 | 126 | 0.657904 | false | 3.098607 | false | false | false |
lidiamcfreitas/FenixScheduleMaker | ScheduleMaker/brython/www/src/Lib/test/test_warnings.py | 27 | 33938 | from contextlib import contextmanager
import linecache
import os
from io import StringIO
import sys
import unittest
import subprocess
from test import support
from test.script_helper import assert_python_ok
from test import warning_tests
import warnings as original_warnings
py_warnings = support.import_fresh_module('warnings', blocked=['_warnings'])
c_warnings = support.import_fresh_module('warnings', fresh=['_warnings'])
@contextmanager
def warnings_state(module):
"""Use a specific warnings implementation in warning_tests."""
global __warningregistry__
for to_clear in (sys, warning_tests):
try:
to_clear.__warningregistry__.clear()
except AttributeError:
pass
try:
__warningregistry__.clear()
except NameError:
pass
original_warnings = warning_tests.warnings
original_filters = module.filters
try:
module.filters = original_filters[:]
module.simplefilter("once")
warning_tests.warnings = module
yield
finally:
warning_tests.warnings = original_warnings
module.filters = original_filters
class BaseTest:
"""Basic bookkeeping required for testing."""
def setUp(self):
# The __warningregistry__ needs to be in a pristine state for tests
# to work properly.
if '__warningregistry__' in globals():
del globals()['__warningregistry__']
if hasattr(warning_tests, '__warningregistry__'):
del warning_tests.__warningregistry__
if hasattr(sys, '__warningregistry__'):
del sys.__warningregistry__
# The 'warnings' module must be explicitly set so that the proper
# interaction between _warnings and 'warnings' can be controlled.
sys.modules['warnings'] = self.module
super(BaseTest, self).setUp()
def tearDown(self):
sys.modules['warnings'] = original_warnings
super(BaseTest, self).tearDown()
class FilterTests(BaseTest):
"""Testing the filtering functionality."""
def test_error(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_error")
def test_ignore(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.warn("FilterTests.test_ignore", UserWarning)
self.assertEqual(len(w), 0)
def test_always(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
message = "FilterTests.test_always"
self.module.warn(message, UserWarning)
self.assertTrue(message, w[-1].message)
self.module.warn(message, UserWarning)
self.assertTrue(w[-1].message, message)
def test_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("default", category=UserWarning)
message = UserWarning("FilterTests.test_default")
for x in range(2):
self.module.warn(message, UserWarning)
if x == 0:
self.assertEqual(w[-1].message, message)
del w[:]
elif x == 1:
self.assertEqual(len(w), 0)
else:
raise ValueError("loop variant unhandled")
def test_module(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("module", category=UserWarning)
message = UserWarning("FilterTests.test_module")
self.module.warn(message, UserWarning)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn(message, UserWarning)
self.assertEqual(len(w), 0)
def test_once(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
message = UserWarning("FilterTests.test_once")
self.module.warn_explicit(message, UserWarning, "test_warnings.py",
42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "test_warnings.py",
13)
self.assertEqual(len(w), 0)
self.module.warn_explicit(message, UserWarning, "test_warnings2.py",
42)
self.assertEqual(len(w), 0)
def test_inheritance(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=Warning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_inheritance", UserWarning)
def test_ordering(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning,
append=True)
del w[:]
try:
self.module.warn("FilterTests.test_ordering", UserWarning)
except UserWarning:
self.fail("order handling for actions failed")
self.assertEqual(len(w), 0)
def test_filterwarnings(self):
# Test filterwarnings().
# Implicitly also tests resetwarnings().
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
self.module.resetwarnings()
text = 'handle normally'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
self.module.filterwarnings("ignore", "", Warning, "", 0)
text = 'filtered out'
self.module.warn(text)
self.assertNotEqual(str(w[-1].message), text)
self.module.resetwarnings()
self.module.filterwarnings("error", "hex*", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'hex/oct')
text = 'nonmatching text'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
class CFilterTests(FilterTests, unittest.TestCase):
module = c_warnings
class PyFilterTests(FilterTests, unittest.TestCase):
module = py_warnings
class WarnTests(BaseTest):
"""Test warnings.warn() and warnings.warn_explicit()."""
def test_message(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
for i in range(4):
text = 'multi %d' %i # Different text on each call.
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
# Issue 3639
def test_warn_nonstandard_types(self):
# warn() should handle non-standard types without issue.
for ob in (Warning, None, 42):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
self.module.warn(ob)
# Don't directly compare objects since
# ``Warning() != Warning()``.
self.assertEqual(str(w[-1].message), str(UserWarning(ob)))
def test_filename(self):
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam1")
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam2")
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
def test_stacklevel(self):
# Test stacklevel argument
# make sure all messages are different, so the warning won't be skipped
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam3", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam4", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.inner("spam5", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"test_warnings.py")
warning_tests.outer("spam6", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"warning_tests.py")
warning_tests.outer("spam6.5", stacklevel=3)
self.assertEqual(os.path.basename(w[-1].filename),
"test_warnings.py")
warning_tests.inner("spam7", stacklevel=9999)
self.assertEqual(os.path.basename(w[-1].filename),
"sys")
def test_missing_filename_not_main(self):
# If __file__ is not specified and __main__ is not the module name,
# then __file__ should be set to the module name.
filename = warning_tests.__file__
try:
del warning_tests.__file__
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam8", stacklevel=1)
self.assertEqual(w[-1].filename, warning_tests.__name__)
finally:
warning_tests.__file__ = filename
def test_missing_filename_main_with_argv(self):
# If __file__ is not specified and the caller is __main__ and sys.argv
# exists, then use sys.argv[0] as the file.
if not hasattr(sys, 'argv'):
return
filename = warning_tests.__file__
module_name = warning_tests.__name__
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam9', stacklevel=1)
self.assertEqual(w[-1].filename, sys.argv[0])
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
def test_missing_filename_main_without_argv(self):
# If __file__ is not specified, the caller is __main__, and sys.argv
# is not set, then '__main__' is the file name.
filename = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
del sys.argv
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam10', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
sys.argv = argv
def test_missing_filename_main_with_argv_empty_string(self):
# If __file__ is not specified, the caller is __main__, and sys.argv[0]
# is the empty string, then '__main__ is the file name.
# Tests issue 2743.
file_name = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
sys.argv = ['']
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam11', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = file_name
warning_tests.__name__ = module_name
sys.argv = argv
def test_warn_explicit_type_errors(self):
# warn_explicit() should error out gracefully if it is given objects
# of the wrong types.
# lineno is expected to be an integer.
self.assertRaises(TypeError, self.module.warn_explicit,
None, UserWarning, None, None)
# Either 'message' needs to be an instance of Warning or 'category'
# needs to be a subclass.
self.assertRaises(TypeError, self.module.warn_explicit,
None, None, None, 1)
# 'registry' must be a dict or None.
self.assertRaises((TypeError, AttributeError),
self.module.warn_explicit,
None, Warning, None, 1, registry=42)
def test_bad_str(self):
# issue 6415
# Warnings instance with a bad format string for __str__ should not
# trigger a bus error.
class BadStrWarning(Warning):
"""Warning with a bad format string for __str__."""
def __str__(self):
return ("A bad formatted string %(err)" %
{"err" : "there is no %(err)s"})
with self.assertRaises(ValueError):
self.module.warn(BadStrWarning())
class CWarnTests(WarnTests, unittest.TestCase):
module = c_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_accelerated(self):
self.assertFalse(original_warnings is self.module)
self.assertFalse(hasattr(self.module.warn, '__code__'))
class PyWarnTests(WarnTests, unittest.TestCase):
module = py_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_pure_python(self):
self.assertFalse(original_warnings is self.module)
self.assertTrue(hasattr(self.module.warn, '__code__'))
class WCmdLineTests(BaseTest):
def test_improper_input(self):
# Uses the private _setoption() function to test the parsing
# of command-line warning arguments
with original_warnings.catch_warnings(module=self.module):
self.assertRaises(self.module._OptionError,
self.module._setoption, '1:2:3:4:5:6')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'bogus::Warning')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'ignore:2::4:-5')
self.module._setoption('error::Warning::0')
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
def test_improper_option(self):
# Same as above, but check that the message is printed out when
# the interpreter is executed. This also checks that options are
# actually parsed at all.
rc, out, err = assert_python_ok("-Wxxx", "-c", "pass")
self.assertIn(b"Invalid -W option ignored: invalid action: 'xxx'", err)
def test_warnings_bootstrap(self):
# Check that the warnings module does get loaded when -W<some option>
# is used (see issue #10372 for an example of silent bootstrap failure).
rc, out, err = assert_python_ok("-Wi", "-c",
"import sys; sys.modules['warnings'].warn('foo', RuntimeWarning)")
# '-Wi' was observed
self.assertFalse(out.strip())
self.assertNotIn(b'RuntimeWarning', err)
class CWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = c_warnings
class PyWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = py_warnings
class _WarningsTests(BaseTest, unittest.TestCase):
"""Tests specific to the _warnings module."""
module = c_warnings
def test_filter(self):
# Everything should function even if 'filters' is not in warnings.
with original_warnings.catch_warnings(module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
del self.module.filters
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
def test_onceregistry(self):
# Replacing or removing the onceregistry should be okay.
global __warningregistry__
message = UserWarning('onceregistry test')
try:
original_registry = self.module.onceregistry
__warningregistry__ = {}
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
# Test the resetting of onceregistry.
self.module.onceregistry = {}
__warningregistry__ = {}
self.module.warn('onceregistry test')
self.assertEqual(w[-1].message.args, message.args)
# Removal of onceregistry is okay.
del w[:]
del self.module.onceregistry
__warningregistry__ = {}
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
finally:
self.module.onceregistry = original_registry
def test_default_action(self):
# Replacing or removing defaultaction should be okay.
message = UserWarning("defaultaction test")
original = self.module.defaultaction
try:
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 42,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
self.assertEqual(len(registry), 1)
del w[:]
# Test removal.
del self.module.defaultaction
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 43,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
self.assertEqual(len(registry), 1)
del w[:]
# Test setting.
self.module.defaultaction = "ignore"
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 44,
registry=registry)
self.assertEqual(len(w), 0)
finally:
self.module.defaultaction = original
def test_showwarning_missing(self):
# Test that showwarning() missing is okay.
text = 'del showwarning test'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
self.module.warn(text)
result = stream.getvalue()
self.assertIn(text, result)
def test_showwarning_not_callable(self):
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
self.module.showwarning = print
with support.captured_output('stdout'):
self.module.warn('Warning!')
self.module.showwarning = 23
self.assertRaises(TypeError, self.module.warn, "Warning!")
def test_show_warning_output(self):
# With showarning() missing, make sure that output is okay.
text = 'test show_warning'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
warning_tests.inner(text)
result = stream.getvalue()
self.assertEqual(result.count('\n'), 2,
"Too many newlines in %r" % result)
first_line, second_line = result.split('\n', 1)
expected_file = os.path.splitext(warning_tests.__file__)[0] + '.py'
first_line_parts = first_line.rsplit(':', 3)
path, line, warning_class, message = first_line_parts
line = int(line)
self.assertEqual(expected_file, path)
self.assertEqual(warning_class, ' ' + UserWarning.__name__)
self.assertEqual(message, ' ' + text)
expected_line = ' ' + linecache.getline(path, line).strip() + '\n'
assert expected_line
self.assertEqual(second_line, expected_line)
def test_filename_none(self):
# issue #12467: race condition if a warning is emitted at shutdown
globals_dict = globals()
oldfile = globals_dict['__file__']
try:
catch = original_warnings.catch_warnings(record=True,
module=self.module)
with catch as w:
self.module.filterwarnings("always", category=UserWarning)
globals_dict['__file__'] = None
original_warnings.warn('test', UserWarning)
self.assertTrue(len(w))
finally:
globals_dict['__file__'] = oldfile
class WarningsDisplayTests(BaseTest):
"""Test the displaying of warnings and the ability to overload functions
related to displaying warnings."""
def test_formatwarning(self):
message = "msg"
category = Warning
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
file_line = linecache.getline(file_name, line_num).strip()
format = "%s:%s: %s: %s\n %s\n"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num))
# Test the 'line' argument.
file_line += " for the win!"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num, file_line))
def test_showwarning(self):
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
expected_file_line = linecache.getline(file_name, line_num).strip()
message = 'msg'
category = Warning
file_object = StringIO()
expect = self.module.formatwarning(message, category, file_name,
line_num)
self.module.showwarning(message, category, file_name, line_num,
file_object)
self.assertEqual(file_object.getvalue(), expect)
# Test 'line' argument.
expected_file_line += "for the win!"
expect = self.module.formatwarning(message, category, file_name,
line_num, expected_file_line)
file_object = StringIO()
self.module.showwarning(message, category, file_name, line_num,
file_object, expected_file_line)
self.assertEqual(expect, file_object.getvalue())
class CWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = c_warnings
class PyWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = py_warnings
class CatchWarningTests(BaseTest):
"""Test catch_warnings()."""
def test_catch_warnings_restore(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure both showwarning and filters are restored when recording
with wmod.catch_warnings(module=wmod, record=True):
wmod.filters = wmod.showwarning = object()
self.assertTrue(wmod.filters is orig_filters)
self.assertTrue(wmod.showwarning is orig_showwarning)
# Same test, but with recording disabled
with wmod.catch_warnings(module=wmod, record=False):
wmod.filters = wmod.showwarning = object()
self.assertTrue(wmod.filters is orig_filters)
self.assertTrue(wmod.showwarning is orig_showwarning)
def test_catch_warnings_recording(self):
wmod = self.module
# Ensure warnings are recorded when requested
with wmod.catch_warnings(module=wmod, record=True) as w:
self.assertEqual(w, [])
self.assertTrue(type(w) is list)
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w[-1].message), "foo")
wmod.warn("bar")
self.assertEqual(str(w[-1].message), "bar")
self.assertEqual(str(w[0].message), "foo")
self.assertEqual(str(w[1].message), "bar")
del w[:]
self.assertEqual(w, [])
# Ensure warnings are not recorded when not requested
orig_showwarning = wmod.showwarning
with wmod.catch_warnings(module=wmod, record=False) as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
def test_catch_warnings_reentry_guard(self):
wmod = self.module
# Ensure catch_warnings is protected against incorrect usage
x = wmod.catch_warnings(module=wmod, record=True)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
# Same test, but with recording disabled
x = wmod.catch_warnings(module=wmod, record=False)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
def test_catch_warnings_defaults(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure default behaviour is not to record warnings
with wmod.catch_warnings(module=wmod) as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
self.assertTrue(wmod.filters is not orig_filters)
self.assertTrue(wmod.filters is orig_filters)
if wmod is sys.modules['warnings']:
# Ensure the default module is this one
with wmod.catch_warnings() as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
self.assertTrue(wmod.filters is not orig_filters)
self.assertTrue(wmod.filters is orig_filters)
def test_check_warnings(self):
# Explicit tests for the test.support convenience wrapper
wmod = self.module
if wmod is not sys.modules['warnings']:
return
with support.check_warnings(quiet=False) as w:
self.assertEqual(w.warnings, [])
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w.message), "foo")
wmod.warn("bar")
self.assertEqual(str(w.message), "bar")
self.assertEqual(str(w.warnings[0].message), "foo")
self.assertEqual(str(w.warnings[1].message), "bar")
w.reset()
self.assertEqual(w.warnings, [])
with support.check_warnings():
# defaults to quiet=True without argument
pass
with support.check_warnings(('foo', UserWarning)):
wmod.warn("foo")
with self.assertRaises(AssertionError):
with support.check_warnings(('', RuntimeWarning)):
# defaults to quiet=False with argument
pass
with self.assertRaises(AssertionError):
with support.check_warnings(('foo', RuntimeWarning)):
wmod.warn("foo")
class CCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = c_warnings
class PyCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = py_warnings
class EnvironmentVariableTests(BaseTest):
def test_single_warning(self):
newenv = os.environ.copy()
newenv["PYTHONWARNINGS"] = "ignore::DeprecationWarning"
p = subprocess.Popen([sys.executable,
"-c", "import sys; sys.stdout.write(str(sys.warnoptions))"],
stdout=subprocess.PIPE, env=newenv)
self.assertEqual(p.communicate()[0], b"['ignore::DeprecationWarning']")
self.assertEqual(p.wait(), 0)
def test_comma_separated_warnings(self):
newenv = os.environ.copy()
newenv["PYTHONWARNINGS"] = ("ignore::DeprecationWarning,"
"ignore::UnicodeWarning")
p = subprocess.Popen([sys.executable,
"-c", "import sys; sys.stdout.write(str(sys.warnoptions))"],
stdout=subprocess.PIPE, env=newenv)
self.assertEqual(p.communicate()[0],
b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
self.assertEqual(p.wait(), 0)
def test_envvar_and_command_line(self):
newenv = os.environ.copy()
newenv["PYTHONWARNINGS"] = "ignore::DeprecationWarning"
p = subprocess.Popen([sys.executable, "-W" "ignore::UnicodeWarning",
"-c", "import sys; sys.stdout.write(str(sys.warnoptions))"],
stdout=subprocess.PIPE, env=newenv)
self.assertEqual(p.communicate()[0],
b"['ignore::UnicodeWarning', 'ignore::DeprecationWarning']")
self.assertEqual(p.wait(), 0)
@unittest.skipUnless(sys.getfilesystemencoding() != 'ascii',
'requires non-ascii filesystemencoding')
def test_nonascii(self):
newenv = os.environ.copy()
newenv["PYTHONWARNINGS"] = "ignore:DeprecaciónWarning"
newenv["PYTHONIOENCODING"] = "utf-8"
p = subprocess.Popen([sys.executable,
"-c", "import sys; sys.stdout.write(str(sys.warnoptions))"],
stdout=subprocess.PIPE, env=newenv)
self.assertEqual(p.communicate()[0],
"['ignore:DeprecaciónWarning']".encode('utf-8'))
self.assertEqual(p.wait(), 0)
class CEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = c_warnings
class PyEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = py_warnings
class BootstrapTest(unittest.TestCase):
def test_issue_8766(self):
# "import encodings" emits a warning whereas the warnings is not loaded
# or not completely loaded (warnings imports indirectly encodings by
# importing linecache) yet
with support.temp_cwd() as cwd, support.temp_cwd('encodings'):
env = os.environ.copy()
env['PYTHONPATH'] = cwd
# encodings loaded by initfsencoding()
retcode = subprocess.call([sys.executable, '-c', 'pass'], env=env)
self.assertEqual(retcode, 0)
# Use -W to load warnings module at startup
retcode = subprocess.call(
[sys.executable, '-c', 'pass', '-W', 'always'],
env=env)
self.assertEqual(retcode, 0)
def setUpModule():
py_warnings.onceregistry.clear()
c_warnings.onceregistry.clear()
tearDownModule = setUpModule
if __name__ == "__main__":
unittest.main()
| bsd-2-clause | -3,574,387,733,208,198,000 | 41.473091 | 80 | 0.582597 | false | 4.434339 | true | false | false |
Petr-Kovalev/nupic-win32 | py/nupic/support/FileLock.py | 2 | 3105 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import fcntl
import os
class FileLockAcquireException(Exception):
pass
class FileLockReleaseException(Exception):
pass
class FileLock(object):
""" This class implements a global file lock that can be used as a
a mutex between cooperating processes.
NOTE: the present implementation's behavior is undefined when multiple
threads may try ackquire a lock on the same file.
"""
def __init__(self, filePath):
"""
Parameters:
------------------------------------------------------------------------
filePath: Path to a file to be used for locking; The file MUST already exist.
"""
assert os.path.isabs(filePath), "not absolute path: %r" % filePath
assert os.path.isfile(filePath), (
"not a file or doesn't exist: %r" % filePath)
self.__filePath = filePath
self.__fp = open(self.__filePath, "r")
self.__fd = self.__fp.fileno()
return
def __enter__(self):
""" Context Manager protocol method. Allows a FileLock instance to be
used in a "with" statement for automatic acquire/release
Parameters:
------------------------------------------------------------------------
retval: self.
"""
self.acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
""" Context Manager protocol method. Allows a FileLock instance to be
used in a "with" statement for automatic acquire/release
"""
self.release()
return False
def acquire(self):
""" Acquire global lock
exception: FileLockAcquireException on failure
"""
try:
fcntl.flock(self.__fd, fcntl.LOCK_EX)
except Exception, e:
e = FileLockAcquireException(
"FileLock acquire failed on %r" % (self.__filePath), e)
raise e, None, sys.exc_info()[2]
return
def release(self):
""" Release global lock
"""
try:
fcntl.flock(self.__fd, fcntl.LOCK_UN)
except Exception, e:
e = FileLockReleaseException(
"FileLock release failed on %r" % (self.__filePath), e)
raise e, None, sys.exc_info()[2]
return | gpl-3.0 | 612,251,764,084,301,400 | 28.028037 | 83 | 0.613205 | false | 4.354839 | false | false | false |
TzuChieh/Photon-v2 | scripts/utility/downloader.py | 1 | 1341 | import urllib.request
import zipfile
import os
import time
def download_file(src_file_url, dst_file_path, max_retry=10):
print("Downloading file %s..." % dst_file_path)
def progress_reporter(num_chunks_read, chunk_size, total_size):
read_so_far = num_chunks_read * chunk_size
print("\r - Downloaded: %d MB -" % (read_so_far / (1 << 20)), flush=True, end="")
num_retry = 0
while True:
try:
urllib.request.urlretrieve(src_file_url, dst_file_path, progress_reporter)
except Exception as e:
print(e)
if num_retry < max_retry:
print("Error downloading file, remaining trials: %d" % (max_retry - num_retry))
print("Download will restart after 30 secs")
time.sleep(30)
num_retry = num_retry + 1
continue
else:
print("Download failed.")
break
else:
print("\nDownload completed.")
break
def download_zipfile_and_extract(src_zipfile_url, dst_directory):
temp_dst_file_path = os.path.join(dst_directory, "__temp__" + str(time.time()))
download_file(src_zipfile_url, temp_dst_file_path)
print("Extracting file...")
# extract zipped resource folder
zip_file = zipfile.ZipFile(temp_dst_file_path, "r")
zip_file.extractall(dst_directory)
zip_file.close()
print("Deleting temporary file %s." % temp_dst_file_path)
# delete zipped resource file
os.remove(temp_dst_file_path)
| mit | 7,877,643,992,446,901,000 | 25.294118 | 83 | 0.687547 | false | 2.993304 | false | false | false |
sendgridlabs/ddbmock | tests/functional/pyramid/test_update_table.py | 1 | 3138 | import json
import time
import unittest
import mock
NOW = time.time()
NOW2 = time.time() + 42 * 1000
TABLE_NAME = 'Table-1'
TABLE_RT = 45
TABLE_WT = 123
TABLE_RT2 = 10
TABLE_WT2 = 10
TABLE_HK_NAME = u'hash_key'
TABLE_HK_TYPE = u'N'
TABLE_RK_NAME = u'range_key'
TABLE_RK_TYPE = u'S'
HEADERS = {
'x-amz-target': 'dynamodb_20111205.UpdateTable',
'content-type': 'application/x-amz-json-1.0',
}
# Goal here is not to test the full API, this is done by the Boto tests
class TestUpdateTable(unittest.TestCase):
@mock.patch("ddbmock.database.table.time") # Brrr
def setUp(self, m_time):
from ddbmock.database.db import dynamodb
from ddbmock.database.table import Table
from ddbmock.database.key import PrimaryKey
from ddbmock import main
app = main({})
from webtest import TestApp
self.app = TestApp(app)
m_time.time.return_value = NOW
dynamodb.hard_reset()
hash_key = PrimaryKey(TABLE_HK_NAME, TABLE_HK_TYPE)
range_key = PrimaryKey(TABLE_RK_NAME, TABLE_RK_TYPE)
t1 = Table(TABLE_NAME, TABLE_RT, TABLE_WT, hash_key, range_key,
status="ACTIVE")
dynamodb.data[TABLE_NAME] = t1
def tearDown(self):
from ddbmock.database.db import dynamodb
dynamodb.hard_reset()
@mock.patch("ddbmock.database.table.time")
def test_update(self, m_time):
from ddbmock.database.db import dynamodb
m_time.time.return_value = NOW2
self.maxDiff = None
request = {
"TableName": TABLE_NAME,
"ProvisionedThroughput": {
"ReadCapacityUnits": TABLE_RT2,
"WriteCapacityUnits": TABLE_WT2,
},
}
expected = {
u'TableDescription': {
u'CreationDateTime': NOW,
u'ItemCount': 0,
u'KeySchema': {
u'HashKeyElement': {
u'AttributeName': u'hash_key',
u'AttributeType': u'N',
},
u'RangeKeyElement': {
u'AttributeName': u'range_key',
u'AttributeType': u'S',
},
},
u'ProvisionedThroughput': {
u'LastDecreaseDateTime': NOW2,
u'ReadCapacityUnits': TABLE_RT2,
u'WriteCapacityUnits': TABLE_WT2,
},
u'TableName': TABLE_NAME,
u'TableSizeBytes': 0,
u'TableStatus': u'UPDATING'
}
}
# Protocol check
res = self.app.post_json('/', request, headers=HEADERS, status=200)
self.assertEqual(expected, json.loads(res.body))
self.assertEqual('application/x-amz-json-1.0; charset=UTF-8',
res.headers['Content-Type'])
# Live data check
data = dynamodb.data
assert TABLE_NAME in data
table = data[TABLE_NAME]
self.assertEqual(TABLE_RT2, table.rt)
self.assertEqual(TABLE_WT2, table.wt)
| lgpl-3.0 | -6,893,056,448,100,929,000 | 28.603774 | 75 | 0.543658 | false | 3.704841 | true | false | false |
agriffis/urlobject | urlobject/path.py | 4 | 4746 | # -*- coding: utf-8 -*-
import posixpath
import urllib
from .compat import urlparse
from .six import text_type, u
class Root(object):
"""A descriptor which always returns the root path."""
def __get__(self, instance, cls):
return cls('/')
class URLPath(text_type):
root = Root()
def __repr__(self):
return u('URLPath(%r)') % (text_type(self),)
@classmethod
def join_segments(cls, segments, absolute=True):
"""Create a :class:`URLPath` from an iterable of segments."""
if absolute:
path = cls('/')
else:
path = cls('')
for segment in segments:
path = path.add_segment(segment)
return path
@property
def segments(self):
"""
Split this path into (decoded) segments.
>>> URLPath('/a/b/c').segments
('a', 'b', 'c')
Non-leaf nodes will have a trailing empty string, and percent encodes
will be decoded:
>>> URLPath('/a%20b/c%20d/').segments
('a b', 'c d', '')
"""
segments = tuple(map(path_decode, self.split('/')))
if segments[0] == '':
return segments[1:]
return segments
@property
def parent(self):
"""
The parent of this node.
>>> URLPath('/a/b/c').parent
URLPath('/a/b/')
>>> URLPath('/foo/bar/').parent
URLPath('/foo/')
"""
if self.is_leaf:
return self.relative('.')
return self.relative('..')
@property
def is_leaf(self):
"""
Is this path a leaf node?
>>> URLPath('/a/b/c').is_leaf
True
>>> URLPath('/a/b/').is_leaf
False
"""
return self and self.segments[-1] != '' or False
@property
def is_relative(self):
"""
Is this path relative?
>>> URLPath('a/b/c').is_relative
True
>>> URLPath('/a/b/c').is_relative
False
"""
return self[0] != '/'
@property
def is_absolute(self):
"""
Is this path absolute?
>>> URLPath('a/b/c').is_absolute
False
>>> URLPath('/a/b/c').is_absolute
True
"""
return self[0] == '/'
def relative(self, rel_path):
"""
Resolve a relative path against this one.
>>> URLPath('/a/b/c').relative('.')
URLPath('/a/b/')
>>> URLPath('/a/b/c').relative('d')
URLPath('/a/b/d')
>>> URLPath('/a/b/c').relative('../d')
URLPath('/a/d')
"""
return type(self)(urlparse.urljoin(self, rel_path))
def add_segment(self, segment):
"""
Add a segment to this path.
>>> URLPath('/a/b/').add_segment('c')
URLPath('/a/b/c')
Non-ASCII and reserved characters (including slashes) will be encoded:
>>> URLPath('/a/b/').add_segment('dé/f')
URLPath('/a/b/d%C3%A9%2Ff')
"""
return type(self)(posixpath.join(self, path_encode(segment)))
def add(self, path):
"""
Add a partial path to this one.
The only difference between this and :meth:`add_segment` is that slash
characters will not be encoded, making it suitable for adding more than
one path segment at a time:
>>> URLPath('/a/b/').add('dé/f/g')
URLPath('/a/b/d%C3%A9/f/g')
"""
return type(self)(posixpath.join(self, path_encode(path, safe='/')))
def _path_encode_py2(s, safe=''):
"""Quote unicode or str using path rules."""
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(safe, unicode):
safe = safe.encode('utf-8')
return urllib.quote(s, safe=safe).decode('utf-8')
def _path_encode_py3(s, safe=''):
"""Quote str or bytes using path rules."""
# s can be bytes or unicode, urllib.parse.quote() assumes
# utf-8 if encoding is necessary.
return urlparse.quote(s, safe=safe)
def _path_decode_py2(s):
"""Unquote unicode or str using path rules."""
if isinstance(s, unicode):
s = s.encode('utf-8')
return urllib.unquote(s).decode('utf-8')
def _path_decode_py3(s):
"""Unquote str or bytes using path rules."""
if isinstance(s, bytes):
s = s.decode('utf-8')
return urlparse.unquote(s)
if hasattr(urllib, 'quote'):
path_encode = _path_encode_py2
path_decode = _path_decode_py2
del _path_encode_py3
del _path_decode_py3
else:
path_encode = _path_encode_py3
path_decode = _path_decode_py3
del _path_encode_py2
del _path_decode_py2
| unlicense | -1,322,789,359,125,008,000 | 24.643243 | 79 | 0.521079 | false | 3.744278 | false | false | false |
varunasingh/ADL_LRS | oauth_provider/managers.py | 4 | 1707 | from django.db import models
from django.contrib.auth.models import User
from consts import KEY_SIZE, SECRET_SIZE
# lou w - removed any references to Resource
class ConsumerManager(models.Manager):
def create_consumer(self, name, user=None):
"""Shortcut to create a consumer with random key/secret."""
consumer, created = self.get_or_create(name=name)
if user is not None:
consumer.user = user
if created:
consumer.generate_random_codes()
return consumer
_default_consumer = None
def get_default_consumer(self, name):
"""Add cache if you use a default consumer."""
if self._default_consumer is None:
self._default_consumer = self.get(name=name)
return self._default_consumer
# lou w -renamed resource to scope
class TokenManager(models.Manager):
def create_token(self, consumer, token_type, timestamp, scope,
user=None, callback=None, callback_confirmed=False, lrs_auth_id=None):
"""Shortcut to create a token with random key/secret."""
token, created = self.get_or_create(consumer=consumer,
token_type=token_type,
timestamp=timestamp,
scope=scope,
user=user,
callback=callback,
callback_confirmed=callback_confirmed,
lrs_auth_id=lrs_auth_id)
if created:
token.generate_random_codes()
return token
| apache-2.0 | 8,737,774,478,903,112,000 | 41.675 | 82 | 0.546573 | false | 4.822034 | false | false | false |
craws/OpenAtlas-Python | openatlas/models/imports.py | 1 | 6864 | from __future__ import annotations # Needed for Python 4.0 type annotations
from typing import Any, List, Optional
from flask import g
from flask_login import current_user
from psycopg2.extras import NamedTupleCursor
from openatlas import app
from openatlas.util.util import is_float
from openatlas.util.display import sanitize, uc_first
class Project:
def __init__(self, row: NamedTupleCursor.Record) -> None:
self.id = row.id
self.name = row.name
self.count = row.count
self.description = row.description if row.description else ''
self.created = row.created
self.modified = row.modified
class Import:
sql = """
SELECT p.id, p.name, p.description, p.created, p.modified, COUNT(e.id) AS count
FROM import.project p LEFT JOIN import.entity e ON p.id = e.project_id """
@staticmethod
def insert_project(name: str, description: Optional[str] = None) -> NamedTupleCursor.Record:
description = description.strip() if description else None
sql = """
INSERT INTO import.project (name, description) VALUES (%(name)s, %(description)s)
RETURNING id;"""
g.execute(sql, {'name': name, 'description': description})
return g.cursor.fetchone()[0]
@staticmethod
def get_all_projects() -> List[Project]:
g.execute(Import.sql + ' GROUP by p.id ORDER BY name;')
return [Project(row) for row in g.cursor.fetchall()]
@staticmethod
def get_project_by_id(id_: int) -> Project:
g.execute(Import.sql + ' WHERE p.id = %(id)s GROUP by p.id;', {'id': id_})
return Project(g.cursor.fetchone())
@staticmethod
def get_project_by_name(name: str) -> Optional[Project]:
g.execute(Import.sql + ' WHERE p.name = %(name)s GROUP by p.id;', {'name': name})
return Project(g.cursor.fetchone()) if g.cursor.rowcount == 1 else None
@staticmethod
def delete_project(id_: int) -> None:
g.execute('DELETE FROM import.project WHERE id = %(id)s;', {'id': id_})
@staticmethod
def check_origin_ids(project: Project, origin_ids: List[str]) -> List[str]:
""" Check if origin ids already in database"""
sql = """
SELECT origin_id FROM import.entity
WHERE project_id = %(project_id)s AND origin_id IN %(ids)s;"""
g.execute(sql, {'project_id': project.id, 'ids': tuple(set(origin_ids))})
return [row.origin_id for row in g.cursor.fetchall()]
@staticmethod
def check_duplicates(class_code: str, names: List[str]) -> List[str]:
sql = """
SELECT DISTINCT name FROM model.entity
WHERE class_code = %(class_code)s AND LOWER(name) IN %(names)s;"""
g.execute(sql, {'class_code': class_code, 'names': tuple(names)})
return [row.name for row in g.cursor.fetchall()]
@staticmethod
def update_project(project: Project) -> None:
sql = """
UPDATE import.project SET (name, description) = (%(name)s, %(description)s)
WHERE id = %(id)s;"""
g.execute(sql, {'id': project.id,
'name': project.name,
'description': sanitize(project.description, 'text')})
@staticmethod
def check_type_id(type_id: str, class_code: str) -> bool: # pragma: no cover
if not type_id.isdigit():
return False
elif int(type_id) not in g.nodes:
return False
else:
# Check if type is allowed (for corresponding form)
valid_type = False
root = g.nodes[g.nodes[int(type_id)].root[0]]
for form_id, form_object in root.forms.items():
if form_object['name'] == uc_first(app.config['CODE_CLASS'][class_code]):
valid_type = True
if not valid_type:
return False
return True
@staticmethod
def import_data(project: 'Project', class_code: str, data: List[Any]) -> None:
from openatlas.models.entity import Entity
from openatlas.models.gis import Gis
for row in data:
system_type = None
if class_code == 'E33': # pragma: no cover
system_type = 'source content'
elif class_code == 'E18':
system_type = 'place'
desc = row['description'] if 'description' in row and row['description'] else None
entity = Entity.insert(class_code, row['name'], system_type, desc)
sql = """
INSERT INTO import.entity (project_id, origin_id, entity_id, user_id)
VALUES (%(project_id)s, %(origin_id)s, %(entity_id)s, %(user_id)s);"""
g.execute(sql, {'project_id': project.id,
'entity_id': entity.id,
'user_id': current_user.id,
'origin_id': row['id'] if 'id' in row and row['id'] else None})
# Dates
if 'begin_from' in row and row['begin_from']:
entity.begin_from = row['begin_from']
if 'begin_to' in row and row['begin_to']:
entity.begin_to = row['begin_to']
if 'begin_comment' in row and row['begin_comment']:
entity.begin_comment = row['begin_comment']
if 'end_from' in row and row['end_from']:
entity.end_from = row['end_from']
if 'end_to' in row and row['end_to']:
entity.end_to = row['end_to']
if 'end_comment' in row and row['end_comment']:
entity.end_comment = row['end_comment']
entity.update()
# Types
if 'type_ids' in row and row['type_ids']: # pragma: no cover
for type_id in row['type_ids'].split():
if not Import.check_type_id(type_id, class_code):
continue
sql = """
INSERT INTO model.link (property_code, domain_id, range_id)
VALUES ('P2', %(domain_id)s, %(type_id)s);"""
g.execute(sql, {'domain_id': entity.id, 'type_id': int(type_id)})
# GIS
if class_code == 'E18':
location = Entity.insert('E53', 'Location of ' + row['name'], 'place location')
entity.link('P53', location)
if 'easting' in row and is_float(row['easting']):
if 'northing' in row and is_float(row['northing']):
Gis.insert_import(entity=entity,
location=location,
project=project,
easting=row['easting'],
northing=row['northing'])
| gpl-2.0 | 3,230,191,295,386,054,700 | 43 | 96 | 0.543998 | false | 3.981439 | false | false | false |
vietdh85/vh-utility | script/invest_tracing.py | 1 | 1487 | import sys
import os.path
import urllib2
import re
from pyquery import PyQuery as pq
import common
def getId(url):
print("=====> ", url)
arr = url.split("-")
arr1 = arr[1].split(".")
id = arr1[0]
return id
def getSiteUrl(id, monitor, rcbUrl):
result = ""
urlRequest = "http://www.{0}/{1}".format(monitor, id)
print("REQUEST: {0}".format(urlRequest))
try:
req = urllib2.urlopen(urlRequest, timeout=30)
url = req.geturl()
arr = url.split("/?")
arr1 = arr[0].split("//")
result = arr1[1].replace("www.", "")
result = result.split("/")[0]
except :
print("========== ERROR ===========")
#common.insertUnknowSite(rcbUrl, monitor)
return result
def getRcb(monitor):
print("invest_tracing.getRcb()")
rcb_url = "http://{0}/newadd.html".format(monitor)
d = pq(url=rcb_url)
tables = d(".listbody tr td[width='28%'] .pro")
siteList = []
for index, item in enumerate(tables):
try:
obj = {}
obj['id'] = getId(item.get("href"))
obj['siteRCBUrl'] = "http://{0}/rcb-{1}.html".format(monitor, obj['id'])
obj['url'] = getSiteUrl(item.get("href"), monitor, obj['siteRCBUrl'])
obj['siteId'] = ""
if obj['url'] != '':
siteId = common.insertSite(obj)
obj['siteId'] = siteId
siteList.append(obj)
print("{0} - {1} - {2}".format(obj['id'], obj['url'], obj['siteId']))
except:
pass
for item in siteList:
common.insertSiteMonitor(item, monitor)
def run():
MONITOR = "invest-tracing.com"
getRcb(MONITOR)
| gpl-3.0 | -6,423,259,681,422,139,000 | 21.530303 | 75 | 0.601883 | false | 2.718464 | false | false | false |
ownport/pywsinfo | tests/test_utils.py | 1 | 2581 | import sys
if '' not in sys.path:
sys.path.append('')
import pywsinfo
import unittest
class WebSiteUtilsTests(unittest.TestCase):
def test_parse_url(self):
url = 'http://www.example.com'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'http://www.example.com', 'host': 'www.example.com'}
)
url = 'http://www.example.com/path'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'http://www.example.com', 'host': 'www.example.com'}
)
url = 'http://www.example.com/path?12&12'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'http://www.example.com', 'host': 'www.example.com'}
)
url = 'https://www.example.com'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'https://www.example.com', 'host': 'www.example.com'}
)
url = 'http://localhost:8080'
self.assertEqual(
pywsinfo.parse_url(url),
{'source_url': 'http://localhost:8080', 'host': 'localhost'}
)
def test_nsloopup(self):
self.assertGreater(len(pywsinfo.nslookup('google.com')), 1)
self.assertGreater(len(pywsinfo.nslookup('www.google.com')), 1)
self.assertEqual(pywsinfo.nslookup('www.google.com2'), [])
def test_parse_html_head(self):
html = '''<head>
<meta name="Keywords" content="keyword1,keyword2">
<meta name="Description" content="description">
</head>'''
self.assertEqual(pywsinfo.parse_html_head(html),
{'keywords':['keyword1','keyword2'], 'description': 'description'})
html = '''<head>
<meta name="keywords" content="keyword1,keyword2">
<meta name="description" content="description">
</head>'''
self.assertEqual(pywsinfo.parse_html_head(html),
{'keywords':['keyword1','keyword2'], 'description': 'description'})
html = '''<head>
<meta name="keywords" content="">
<meta name="description" content="">
</head>'''
self.assertEqual(pywsinfo.parse_html_head(html), {})
html = '''<head></head>'''
self.assertEqual(pywsinfo.parse_html_head(html), {})
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 6,211,147,958,173,613,000 | 34.847222 | 91 | 0.511817 | false | 4.020249 | true | false | false |
avihad/ARP-Storm | src/arp_open_flow/of_sw_tutorial_oo.py | 1 | 12747 | #!/usr/bin/python
# Copyright 2012 James McCauley, William Yu
# wyu@ateneo.edu
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is a demonstration file that has various switch implementations.
The first example is a basic "all match" switch followed by a
destination match, pair match then finally a more ideal pair match
switch.
Mininet Command: sudo mn --topo single,3 --mac
--switch ovsk
--controller remote
Command Line: ./pox.py py --completion
log.level --DEBUG
samples.of_sw_tutorial_oo
THIS VERSION SUPPORT resend() functionality in the betta branch POX.
Object-oriented version that allows user to switch switches via the
command line interface.
"""
# These next two imports are common POX convention
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpidToStr
# Even a simple usage of the logger is much nicer than print!
log = core.getLogger()
# Create the class to hold the switch tutorial implementations
class SwitchTutorial (object):
# This table maps (switch,MAC-addr) pairs to the port on 'switch' at
# which we last saw a packet *from* 'MAC-addr'.
# (In this case, we use a Connection object for the switch.)
table = {}
# Holds the object with the default switch
handlerName = 'SW_IDEALPAIRSWITCH'
# Holds the current active PacketIn listener object
listeners = None
# Constructor and sets default handler to Ideal Pair Switch
def __init__(self, handlerName='SW_IDEALPAIRSWITCH'):
log.debug("Initializing switch %s." % handlerName)
# Method for just sending a packet to any port (broadcast by default)
def send_packet(self, event, dst_port=of.OFPP_ALL):
msg = of.ofp_packet_out(in_port=event.ofp.in_port)
if event.ofp.buffer_id != -1 and event.ofp.buffer_id is not None:
msg.buffer_id = event.ofp.buffer_id
else:
if event.ofp.data:
return
msg.data = event.ofp.data
msg.actions.append(of.ofp_action_output(port=dst_port))
event.connection.send(msg)
# Optimal method for resending a packet
def resend_packet(self, event, dst_port=of.OFPP_ALL):
msg = of.ofp_packet_out(data=event.ofp)
msg.actions.append(of.ofp_action_output(port=dst_port))
event.connection.send(msg)
# DUMB HUB Implementation
# This is an implementation of a broadcast hub but all packets go
# to the controller since no flows are installed.
def _handle_dumbhub_packetin(self, event):
# Just send an instruction to the switch to send packet to all ports
packet = event.parsed
self.resend_packet(event, of.OFPP_ALL)
log.debug("Broadcasting %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
# PAIR-WISE MATCHING HUB Implementation
# This is an implementation of a broadcast hub with flows installed.
def _handle_pairhub_packetin(self, event):
packet = event.parsed
# Create flow that simply broadcasts any packet received
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port=of.OFPP_ALL))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
# LAZY HUB Implementation (How hubs typically are)
# This is an implementation of a broadcast hub with flows installed.
def _handle_lazyhub_packetin(self, event):
packet = event.parsed
# Create flow that simply broadcasts any packet received
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.actions.append(of.ofp_action_output(port=of.OFPP_ALL))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i" %
("ff:ff:ff:ff:ff:ff", event.ofp.in_port, "ff:ff:ff:ff:ff:ff",
of.OFPP_ALL))
# BAD SWITCH Implementation
# This is an obvious but problematic implementation of switch that
# routes based on destination MAC addresses.
def _handle_badswitch_packetin(self, event):
packet = event.parsed
# Learn the source and fill up routing table
self.table[(event.connection, packet.src)] = event.port
# install appropriate flow rule when learned
msg = of.ofp_flow_mod()
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_dst = packet.src
msg.actions.append(of.ofp_action_output(port=event.port))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i" %
("ff:ff:ff:ff:ff:ff", event.ofp.in_port, packet.src, event.port))
# determine if appropriate destination route is available
dst_port = self.table.get((event.connection, packet.dst))
if dst_port is None:
# We don't know where the destination is yet. So, we'll just
# send the packet out all ports (except the one it came in on!)
# and hope the destination is out there somewhere. :)
# To send out all ports, we can use either of the special ports
# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD,
# but it's not clear if all switches support this. :(
self.resend_packet(event, of.OFPP_ALL)
log.debug("Broadcasting %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
else:
# This is the packet that just came in -- we want send the packet
# if we know the destination.
self.resend_packet(event, dst_port)
log.debug("Sending %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, dst_port))
# PAIR-WISE MATCH SWITCH Implementation
# This is an implementation of an pair match switch. This only matches
# source and destination MAC addresses. Whenever a new source
# destination MAC address is detected it then add a new flow
# identifying the source destination pair. The routing table is updated
# using the detected destination MAC address to the destination port.
def _handle_pairswitch_packetin (self, event):
packet = event.parsed
# Learn the source and fill up routing table
self.table[(event.connection, packet.src)] = event.port
dst_port = self.table.get((event.connection, packet.dst))
if dst_port is None:
# We don't know where the destination is yet. So, we'll just
# send the packet out all ports (except the one it came in on!)
# and hope the destination is out there somewhere. :)
# To send out all ports, we can use either of the special ports
# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD,
# but it's not clear if all switches support this. :(
self.resend_packet(event, of.OFPP_ALL)
log.debug("Broadcasting %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
else:
# This is the packet that just came in -- we want to
# install the rule and also resend the packet.
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port=dst_port))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, dst_port))
# SMARTER PAIR-WISE MATCH SWITCH Implementation
# This is an implementation of an ideal pair switch. This optimizes the
# previous example by adding both direction in one entry.
def _handle_idealpairswitch_packetin(self, event):
packet = event.parsed
# Learn the source and fill up routing table
self.table[(event.connection, packet.src)] = event.port
dst_port = self.table.get((event.connection, packet.dst))
if dst_port is None:
# We don't know where the destination is yet. So, we'll just
# send the packet out all ports (except the one it came in on!)
# and hope the destination is out there somewhere. :)
# To send out all ports, we can use either of the special ports
# OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD,
# but it's not clear if all switches support this. :(
self.resend_packet(event, of.OFPP_ALL)
log.debug("Broadcasting %s.%i -> %s.%i" %
(packet.src, event.ofp.in_port, packet.dst, of.OFPP_ALL))
else:
# Since we know the switch ports for both the source and dest
# MACs, we can install rules for both directions.
msg = of.ofp_flow_mod()
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_dst = packet.src
msg.match.dl_src = packet.dst
msg.actions.append(of.ofp_action_output(port=event.port))
event.connection.send(msg)
# This is the packet that just came in -- we want to
# install the rule and also resend the packet.
msg = of.ofp_flow_mod()
msg.data = event.ofp
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.match.dl_src = packet.src
msg.match.dl_dst = packet.dst
msg.actions.append(of.ofp_action_output(port=dst_port))
event.connection.send(msg)
log.debug("Installing %s.%i -> %s.%i AND %s.%i -> %s.%i" %
(packet.dst, dst_port, packet.src, event.ofp.in_port,
packet.src, event.ofp.in_port, packet.dst, dst_port))
# Define the proper handler
def _set_handler_name (self, handlerName='SW_IDEALPAIRSWITCH'):
self.handlerName = handlerName
# Function to grab the appropriate handler
def _get_handler (self, event):
return self.swMap[self.handlerName](self, event)
""" Here are functions that are meant to be called directly """
# Here is a function to list all possible switches
def list_available_listeners(self):
for key in self.swMap.iterkeys():
log.info("%s" % key)
# Here is a function to displaying possible methods
def help(self):
log.info("Methods available: %s %s %s %s %s" %
('list_available_listeners()',
'attach_packetin_listener(handlerName = \'SW_IDEALPAIRSWITCH\'',
'detach_packetin_listener()',
'clear_all_flows()',
'clear_flows(connection)'))
# Here is a function to attach the listener give the default handerName
def attach_packetin_listener (self, handlerName='SW_IDEALPAIRSWITCH'):
self._set_handler_name(handlerName)
self.listeners = core.openflow.addListenerByName("PacketIn",
self._get_handler)
log.debug("Attach switch %s." % handlerName)
# Here is a function to remove the listener
def detach_packetin_listener (self):
core.openflow.removeListener(self.listeners)
log.debug("Detaching switch %s." % self.handlerName)
# Function to clear all flows from a specified switch given
# a connection object
def clear_flows (self, connection):
msg = of.ofp_flow_mod(match=of.ofp_match(), command=of.OFPFC_DELETE)
connection.send(msg)
log.debug("Clearing all flows from %s." %
dpidToStr(connection.dpid))
# Function to clear all flows from all switches
def clear_all_flows (self):
msg = of.ofp_flow_mod(match=of.ofp_match(), command=of.OFPFC_DELETE)
for connection in core.openflow._connections.values():
connection.send(msg)
log.debug("Clearing all flows from %s." %
dpidToStr(connection.dpid))
# Define various switch handlers
swMap = {
'SW_DUMBHUB' : _handle_dumbhub_packetin,
'SW_PAIRHUB' : _handle_pairhub_packetin,
'SW_LAZYHUB' : _handle_lazyhub_packetin,
'SW_BADSWITCH' : _handle_badswitch_packetin,
'SW_PAIRSWITCH' : _handle_pairswitch_packetin,
'SW_IDEALPAIRSWITCH' : _handle_idealpairswitch_packetin,
}
# function that is invoked upon load to ensure that listeners are
# registered appropriately. Uncomment the hub/switch you would like
# to test. Only one at a time please.
def launch ():
# create new tutorial class object using the IDEAL PAIR SWITCH as default
MySwitch = SwitchTutorial('SW_IDEALPAIRSWITCH')
# add this class into core.Interactive.variables to ensure we can access
# it in the CLI.
core.Interactive.variables['MySwitch'] = MySwitch
# attach the corresponding default listener
MySwitch.attach_packetin_listener()
| apache-2.0 | 2,650,485,851,227,177,500 | 37.627273 | 75 | 0.688633 | false | 3.508671 | false | false | false |
quantopian/zipline | tests/data/test_fx.py | 1 | 10952 | import itertools
import pandas as pd
import numpy as np
from zipline.data.fx import DEFAULT_FX_RATE
from zipline.testing.predicates import assert_equal
import zipline.testing.fixtures as zp_fixtures
class _FXReaderTestCase(zp_fixtures.WithFXRates,
zp_fixtures.ZiplineTestCase):
"""
Base class for testing FXRateReader implementations.
To test a new FXRateReader implementation, subclass from this base class
and implement the ``reader`` property, returning an FXRateReader that uses
the data stored in ``cls.fx_rates``.
"""
FX_RATES_START_DATE = pd.Timestamp('2014-01-01', tz='UTC')
FX_RATES_END_DATE = pd.Timestamp('2014-01-31', tz='UTC')
# Calendar to which exchange rates data is aligned.
FX_RATES_CALENDAR = '24/5'
# Currencies between which exchange rates can be calculated.
FX_RATES_CURRENCIES = ["USD", "CAD", "GBP", "EUR"]
# Fields for which exchange rate data is present.
FX_RATES_RATE_NAMES = ["london_mid", "tokyo_mid"]
# Field to be used on a lookup of `'default'`.
FX_RATES_DEFAULT_RATE = 'london_mid'
# Used by WithFXRates.
@classmethod
def make_fx_rates(cls, fields, currencies, sessions):
ndays = len(sessions)
# Give each currency a timeseries of "true" values, and compute fx
# rates as ratios between true values.
reference = pd.DataFrame({
'USD': np.linspace(1.0, 2.0, num=ndays),
'CAD': np.linspace(2.0, 3.0, num=ndays),
'GBP': np.linspace(3.0, 4.0, num=ndays),
'EUR': np.linspace(4.0, 5.0, num=ndays),
}, index=sessions, columns=currencies)
cls.tokyo_mid_rates = cls.make_fx_rates_from_reference(reference)
# Make london_mid different by adding +1 to reference values.
cls.london_mid_rates = cls.make_fx_rates_from_reference(reference + 1)
# This will be set as cls.fx_rates by WithFXRates.
return {
'london_mid': cls.london_mid_rates,
'tokyo_mid': cls.tokyo_mid_rates,
}
@property
def reader(self):
raise NotImplementedError("Must be implemented by test suite.")
def test_scalar_lookup(self):
reader = self.reader
rates = self.FX_RATES_RATE_NAMES
quotes = self.FX_RATES_CURRENCIES
bases = self.FX_RATES_CURRENCIES + [None]
dates = pd.date_range(
self.FX_RATES_START_DATE - pd.Timedelta('1 day'),
self.FX_RATES_END_DATE + pd.Timedelta('1 day'),
)
cases = itertools.product(rates, quotes, bases, dates)
for rate, quote, base, dt in cases:
dts = pd.DatetimeIndex([dt], tz='UTC')
bases = np.array([base], dtype=object)
result = reader.get_rates(rate, quote, bases, dts)
assert_equal(result.shape, (1, 1))
result_scalar = result[0, 0]
if dt >= self.FX_RATES_START_DATE and quote == base:
assert_equal(result_scalar, 1.0)
expected = self.get_expected_fx_rate_scalar(rate, quote, base, dt)
assert_equal(result_scalar, expected)
col_result = reader.get_rates_columnar(rate, quote, bases, dts)
assert_equal(col_result, result.ravel())
alt_result_scalar = reader.get_rate_scalar(rate, quote, base, dt)
assert_equal(result_scalar, alt_result_scalar)
def test_2d_lookup(self):
rand = np.random.RandomState(42)
dates = pd.date_range(
self.FX_RATES_START_DATE - pd.Timedelta('2 days'),
self.FX_RATES_END_DATE + pd.Timedelta('2 days'),
)
rates = self.FX_RATES_RATE_NAMES + [DEFAULT_FX_RATE]
possible_quotes = self.FX_RATES_CURRENCIES
possible_bases = self.FX_RATES_CURRENCIES + [None]
# For every combination of rate name and quote currency...
for rate, quote in itertools.product(rates, possible_quotes):
# Choose N random distinct days...
for ndays in 1, 2, 7, 20:
dts_raw = rand.choice(dates, ndays, replace=False)
dts = pd.DatetimeIndex(dts_raw, tz='utc').sort_values()
# Choose M random possibly-non-distinct currencies...
for nbases in 1, 2, 10, 200:
bases = (
rand.choice(possible_bases, nbases, replace=True)
.astype(object)
)
# ...And check that we get the expected result when querying
# for those dates/currencies.
result = self.reader.get_rates(rate, quote, bases, dts)
expected = self.get_expected_fx_rates(rate, quote, bases, dts)
assert_equal(result, expected)
def test_columnar_lookup(self):
rand = np.random.RandomState(42)
dates = pd.date_range(
self.FX_RATES_START_DATE - pd.Timedelta('2 days'),
self.FX_RATES_END_DATE + pd.Timedelta('2 days'),
)
rates = self.FX_RATES_RATE_NAMES + [DEFAULT_FX_RATE]
possible_quotes = self.FX_RATES_CURRENCIES
possible_bases = self.FX_RATES_CURRENCIES + [None]
reader = self.reader
# For every combination of rate name and quote currency...
for rate, quote in itertools.product(rates, possible_quotes):
for N in 1, 2, 10, 200:
# Choose N (date, base) pairs randomly with replacement.
dts_raw = rand.choice(dates, N, replace=True)
dts = pd.DatetimeIndex(dts_raw, tz='utc')
bases = (
rand.choice(possible_bases, N, replace=True)
.astype(object)
)
# ... And check that we get the expected result when querying
# for those dates/currencies.
result = reader.get_rates_columnar(rate, quote, bases, dts)
expected = self.get_expected_fx_rates_columnar(
rate,
quote,
bases,
dts,
)
assert_equal(result, expected)
def test_load_everything(self):
# Sanity check for the randomized tests above: check that we get
# exactly the rates we set up in make_fx_rates if we query for their
# indices.
for currency in self.FX_RATES_CURRENCIES:
tokyo_rates = self.tokyo_mid_rates[currency]
tokyo_result = self.reader.get_rates(
'tokyo_mid',
currency,
tokyo_rates.columns,
tokyo_rates.index,
)
assert_equal(tokyo_result, tokyo_rates.values)
london_rates = self.london_mid_rates[currency]
london_result = self.reader.get_rates(
'london_mid',
currency,
london_rates.columns,
london_rates.index,
)
default_result = self.reader.get_rates(
DEFAULT_FX_RATE,
currency,
london_rates.columns,
london_rates.index,
)
assert_equal(london_result, default_result)
assert_equal(london_result, london_rates.values)
def test_read_before_start_date(self):
# Reads from before the start of our data should emit NaN. We do this
# because, for some Pipeline loaders, it's hard to put a lower bound on
# input asof dates, so we end up making queries for asof_dates that
# might be before the start of FX data. When that happens, we want to
# emit NaN, but we don't want to fail.
for bad_date in (self.FX_RATES_START_DATE - pd.Timedelta('1 day'),
self.FX_RATES_START_DATE - pd.Timedelta('1000 days')):
for rate in self.FX_RATES_RATE_NAMES:
quote = 'USD'
bases = np.array(['CAD'], dtype=object)
dts = pd.DatetimeIndex([bad_date])
result = self.reader.get_rates(rate, quote, bases, dts)
assert_equal(result.shape, (1, 1))
assert_equal(np.nan, result[0, 0])
def test_read_after_end_date(self):
# Reads from **after** the end of our data, on the other hand, should
# fail. We can always upper bound the relevant asofs that we're
# interested in, and having fx rates forward-fill past the end of data
# is confusing and takes a while to debug.
for bad_date in (self.FX_RATES_END_DATE + pd.Timedelta('1 day'),
self.FX_RATES_END_DATE + pd.Timedelta('1000 days')):
for rate in self.FX_RATES_RATE_NAMES:
quote = 'USD'
bases = np.array(['CAD'], dtype=object)
dts = pd.DatetimeIndex([bad_date])
result = self.reader.get_rates(rate, quote, bases, dts)
assert_equal(result.shape, (1, 1))
expected = self.get_expected_fx_rate_scalar(
rate,
quote,
'CAD',
self.FX_RATES_END_DATE,
)
assert_equal(expected, result[0, 0])
def test_read_unknown_base(self):
for rate in self.FX_RATES_RATE_NAMES:
quote = 'USD'
for unknown_base in 'XXX', None:
bases = np.array([unknown_base], dtype=object)
dts = pd.DatetimeIndex([self.FX_RATES_START_DATE])
result = self.reader.get_rates(rate, quote, bases, dts)[0, 0]
assert_equal(result, np.nan)
class InMemoryFXReaderTestCase(_FXReaderTestCase):
@property
def reader(self):
return self.in_memory_fx_rate_reader
class HDF5FXReaderTestCase(zp_fixtures.WithTmpDir,
_FXReaderTestCase):
@classmethod
def init_class_fixtures(cls):
super(HDF5FXReaderTestCase, cls).init_class_fixtures()
path = cls.tmpdir.getpath('fx_rates.h5')
cls.h5_fx_reader = cls.write_h5_fx_rates(path)
@property
def reader(self):
return self.h5_fx_reader
class FastGetLocTestCase(zp_fixtures.ZiplineTestCase):
def test_fast_get_loc_ffilled(self):
dts = pd.to_datetime([
'2014-01-02',
'2014-01-03',
# Skip 2014-01-04
'2014-01-05',
'2014-01-06',
])
for dt in pd.date_range('2014-01-02', '2014-01-08'):
result = zp_fixtures.fast_get_loc_ffilled(dts.values, dt.asm8)
expected = dts.get_loc(dt, method='ffill')
assert_equal(result, expected)
with self.assertRaises(KeyError):
dts.get_loc(pd.Timestamp('2014-01-01'), method='ffill')
with self.assertRaises(KeyError):
zp_fixtures.fast_get_loc_ffilled(dts, pd.Timestamp('2014-01-01'))
| apache-2.0 | 8,554,352,786,142,717,000 | 37.293706 | 79 | 0.56985 | false | 3.726438 | true | false | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/network/ipset_binding.py | 1 | 3601 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class ipset_binding(base_resource):
""" Binding class showing the resources that can be bound to ipset_binding.
"""
def __init__(self) :
self._name = ""
self.ipset_nsip_binding = []
self.ipset_nsip6_binding = []
@property
def name(self) :
"""Name of the IP set whose details you want to display.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the IP set whose details you want to display.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ipset_nsip_bindings(self) :
"""nsip that can be bound to ipset.
"""
try :
return self._ipset_nsip_binding
except Exception as e:
raise e
@property
def ipset_nsip6_bindings(self) :
"""nsip6 that can be bound to ipset.
"""
try :
return self._ipset_nsip6_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(ipset_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipset_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
""" Use this API to fetch ipset_binding resource.
"""
try :
if type(name) is not list :
obj = ipset_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [ipset_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class ipset_binding_response(base_response) :
def __init__(self, length=1) :
self.ipset_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipset_binding = [ipset_binding() for _ in range(length)]
| apache-2.0 | -326,166,921,763,444,100 | 28.040323 | 115 | 0.687587 | false | 3.229596 | false | false | false |