repo_name
stringlengths 5
88
| path
stringlengths 4
199
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 855
832k
| license
stringclasses 15
values | hash
int64 -9,223,128,179,723,874,000
9,223,237,214B
| line_mean
float64 3.5
99
| line_max
int64 15
999
| alpha_frac
float64 0.25
0.87
| autogenerated
bool 1
class | ratio
float64 1.5
7.55
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class | score
float64 0
0.2
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
odicraig/kodi2odi | addons/plugin.video.OblivionStreams/genesisresolvers.py | 2 | 69129 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,urllib2,urlparse,re,os,sys,xbmcaddon
try:
import CommonFunctions as common
except:
import commonfunctionsdummy as common
try:
import json
except:
import simplejson as json
class get(object):
def __init__(self, url):
self.result = self.worker(url)
def worker(self, url):
try:
pz = premiumize().resolve(url)
if not pz == None: return pz
rd = realdebrid().resolve(url)
if not rd == None: return rd
if url.startswith('rtmp'):
if len(re.compile('\s*timeout=(\d*)').findall(url)) == 0: url += ' timeout=10'
return url
u = urlparse.urlparse(url).netloc
u = u.replace('www.', '').replace('embed.', '')
u = u.lower()
import sys, inspect
r = inspect.getmembers(sys.modules[__name__], inspect.isclass)
r = [i for i in r if hasattr(i[1], 'info') and u in eval(i[0])().info()['netloc']][0][0]
r = eval(r)().resolve(url)
if r == None: return r
elif type(r) == list: return r
elif not r.startswith('http'): return r
try: h = dict(urlparse.parse_qsl(r.rsplit('|', 1)[1]))
except: h = dict('')
h.update({'Referer': url, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0'})
r = '%s|%s' % (r.split('|')[0], urllib.urlencode(h))
return r
except:
return url
class getUrl(object):
def __init__(self, url, close=True, proxy=None, post=None, headers=None, mobile=False, referer=None, cookie=None, output='', timeout='15'):
handlers = []
if not proxy == None:
handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
if output == 'cookie' or not close == True:
import cookielib
cookies = cookielib.LWPCookieJar()
handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
try:
if sys.version_info < (2, 7, 9): raise Exception()
import ssl; ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
handlers += [urllib2.HTTPSHandler(context=ssl_context)]
opener = urllib2.build_opener(*handlers)
opener = urllib2.install_opener(opener)
except:
pass
try: headers.update(headers)
except: headers = {}
if 'User-Agent' in headers:
pass
elif not mobile == True:
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0'
else:
headers['User-Agent'] = 'Apple-iPhone/701.341'
if 'referer' in headers:
pass
elif referer == None:
headers['referer'] = url
else:
headers['referer'] = referer
if not 'Accept-Language' in headers:
headers['Accept-Language'] = 'en-US'
if 'cookie' in headers:
pass
elif not cookie == None:
headers['cookie'] = cookie
request = urllib2.Request(url, data=post, headers=headers)
response = urllib2.urlopen(request, timeout=int(timeout))
if output == 'cookie':
result = []
for c in cookies: result.append('%s=%s' % (c.name, c.value))
result = "; ".join(result)
elif output == 'geturl':
result = response.geturl()
else:
result = response.read()
if close == True:
response.close()
self.result = result
class captcha:
def worker(self, data):
self.captcha = {}
self.solvemedia(data)
if not self.type == None: return self.captcha
self.recaptcha(data)
if not self.type == None: return self.captcha
self.capimage(data)
if not self.type == None: return self.captcha
self.numeric(data)
if not self.type == None: return self.captcha
def solvemedia(self, data):
try:
url = common.parseDOM(data, "iframe", ret="src")
url = [i for i in url if 'api.solvemedia.com' in i]
if len(url) > 0: self.type = 'solvemedia'
else: self.type = None ; return
result = getUrl(url[0], referer='').result
response = common.parseDOM(result, "iframe", ret="src")
response += common.parseDOM(result, "img", ret="src")
response = [i for i in response if '/papi/media' in i][0]
response = 'http://api.solvemedia.com' + response
response = self.keyboard(response)
post = {}
f = common.parseDOM(result, "form", attrs = { "action": "verify.noscript" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'adcopy_response': response})
getUrl('http://api.solvemedia.com/papi/verify.noscript', post=urllib.urlencode(post)).result
self.captcha.update({'adcopy_challenge': post['adcopy_challenge'], 'adcopy_response': 'manual_challenge'})
except:
pass
def recaptcha(self, data):
try:
url = []
if data.startswith('http://www.google.com'): url += [data]
url += common.parseDOM(data, "script", ret="src", attrs = { "type": "text/javascript" })
url = [i for i in url if 'http://www.google.com' in i]
if len(url) > 0: self.type = 'recaptcha'
else: self.type = None ; return
result = getUrl(url[0]).result
challenge = re.compile("challenge\s+:\s+'(.+?)'").findall(result)[0]
response = 'http://www.google.com/recaptcha/api/image?c=' + challenge
response = self.keyboard(response)
self.captcha.update({'recaptcha_challenge_field': challenge, 'recaptcha_challenge': challenge, 'recaptcha_response_field': response, 'recaptcha_response': response})
except:
pass
def capimage(self, data):
try:
url = common.parseDOM(data, "img", ret="src")
url = [i for i in url if 'captcha' in i]
if len(url) > 0: self.type = 'capimage'
else: self.type = None ; return
response = self.keyboard(url[0])
self.captcha.update({'code': response})
except:
pass
def numeric(self, data):
try:
url = re.compile("left:(\d+)px;padding-top:\d+px;'>&#(.+?);<").findall(data)
if len(url) > 0: self.type = 'numeric'
else: self.type = None ; return
result = sorted(url, key=lambda ltr: int(ltr[0]))
response = ''.join(str(int(num[1])-48) for num in result)
self.captcha.update({'code': response})
except:
pass
def keyboard(self, response):
try:
import os,xbmc,xbmcgui,xbmcaddon,xbmcvfs
dataPath = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo("profile"))
i = os.path.join(dataPath.decode("utf-8"),'img')
f = xbmcvfs.File(i, 'w')
f.write(getUrl(response).result)
f.close()
f = xbmcgui.ControlImage(450,5,375,115, i)
d = xbmcgui.WindowDialog()
d.addControl(f)
xbmcvfs.delete(i)
d.show()
xbmc.sleep(3000)
t = 'Type the letters in the image'
c = common.getUserInput(t, '')
d.close()
return c
except:
return
class unwise:
def worker(self, str_eval):
page_value=""
try:
ss="w,i,s,e=("+str_eval+')'
exec (ss)
page_value=self.__unwise(w,i,s,e)
except: return
return page_value
def __unwise(self, w, i, s, e):
lIll = 0;
ll1I = 0;
Il1l = 0;
ll1l = [];
l1lI = [];
while True:
if (lIll < 5):
l1lI.append(w[lIll])
elif (lIll < len(w)):
ll1l.append(w[lIll]);
lIll+=1;
if (ll1I < 5):
l1lI.append(i[ll1I])
elif (ll1I < len(i)):
ll1l.append(i[ll1I])
ll1I+=1;
if (Il1l < 5):
l1lI.append(s[Il1l])
elif (Il1l < len(s)):
ll1l.append(s[Il1l]);
Il1l+=1;
if (len(w) + len(i) + len(s) + len(e) == len(ll1l) + len(l1lI) + len(e)):
break;
lI1l = ''.join(ll1l)
I1lI = ''.join(l1lI)
ll1I = 0;
l1ll = [];
for lIll in range(0,len(ll1l),2):
ll11 = -1;
if ( ord(I1lI[ll1I]) % 2):
ll11 = 1;
l1ll.append(chr( int(lI1l[lIll: lIll+2], 36) - ll11));
ll1I+=1;
if (ll1I >= len(l1lI)):
ll1I = 0;
ret=''.join(l1ll)
if 'eval(function(w,i,s,e)' in ret:
ret=re.compile('eval\(function\(w,i,s,e\).*}\((.*?)\)').findall(ret)[0]
return self.worker(ret)
else:
return ret
class js:
def worker(self, script):
aSplit = script.split(";',")
p = str(aSplit[0])
aSplit = aSplit[1].split(",")
a = int(aSplit[0])
c = int(aSplit[1])
k = aSplit[2].split(".")[0].replace("'", '').split('|')
e = ''
d = ''
sUnpacked = str(self.__unpack(p, a, c, k, e, d))
sUnpacked = sUnpacked.replace('\\', '')
url = self.__parse(sUnpacked)
return url
def __unpack(self, p, a, c, k, e, d):
while (c > 1):
c = c -1
if (k[c]):
p = re.sub('\\b' + str(self.__itoa(c, a)) +'\\b', k[c], p)
return p
def __itoa(self, num, radix):
result = ""
while num > 0:
result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result
num /= radix
return result
def __parse(self, sUnpacked):
url = re.compile("'file' *, *'(.+?)'").findall(sUnpacked)
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(sUnpacked)
url += re.compile("playlist=(.+?)&").findall(sUnpacked)
url += common.parseDOM(sUnpacked, "embed", ret="src")
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[-1].split('://', 1)[-1]
return url
class premiumize:
def __init__(self):
self.user = xbmcaddon.Addon().getSetting("premiumize_user")
self.password = xbmcaddon.Addon().getSetting("premiumize_password")
def info(self):
return {
'netloc': ['bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Bitshare', 'Filefactory', 'K2S', 'Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}
def status(self):
if (self.user == '' or self.password == ''): return False
else: return True
def hosts(self):
try:
if self.status() == False: raise Exception()
url = 'http://api.premiumize.me/pm-api/v1.php?method=hosterlist¶ms[login]=%s¶ms[pass]=%s' % (self.user, self.password)
result = getUrl(url).result
pz = json.loads(result)['result']['hosterlist']
pz = [i.rsplit('.' ,1)[0].lower() for i in pz]
return pz
except:
return
def resolve(self, url):
try:
if self.status() == False: raise Exception()
url = 'http://api.premiumize.me/pm-api/v1.php?method=directdownloadlink¶ms[login]=%s¶ms[pass]=%s¶ms[link]=%s' % (self.user, self.password, urllib.quote_plus(url))
result = getUrl(url, close=False).result
url = json.loads(result)['result']['location']
return url
except:
return
class realdebrid:
def __init__(self):
self.user = xbmcaddon.Addon().getSetting("realdedrid_user")
self.password = xbmcaddon.Addon().getSetting("realdedrid_password")
def info(self):
return {
'netloc': ['bitshare.com', 'filefactory.com', 'k2s.cc', 'oboom.com', 'rapidgator.net', 'uploaded.net'],
'host': ['Bitshare', 'Filefactory', 'K2S', 'Oboom', 'Rapidgator', 'Uploaded'],
'quality': 'High',
'captcha': False,
'a/c': True
}
def status(self):
if (self.user == '' or self.password == ''): return False
else: return True
def hosts(self):
try:
if self.status() == False: raise Exception()
url = 'http://real-debrid.com/api/hosters.php'
result = getUrl(url).result
rd = json.loads('[%s]' % result)
rd = [i.rsplit('.' ,1)[0].lower() for i in rd]
return rd
except:
return
def resolve(self, url):
try:
if self.status() == False: raise Exception()
login_data = urllib.urlencode({'user' : self.user, 'pass' : self.password})
login_link = 'http://real-debrid.com/ajax/login.php?%s' % login_data
result = getUrl(login_link, close=False).result
result = json.loads(result)
error = result['error']
if not error == 0: raise Exception()
url = 'http://real-debrid.com/ajax/unrestrict.php?link=%s' % url
url = url.replace('filefactory.com/stream/', 'filefactory.com/file/')
result = getUrl(url).result
result = json.loads(result)
url = result['generated_links'][0][-1]
return url
except:
return
class _180upload:
def info(self):
return {
'netloc': ['180upload.com'],
'host': ['180upload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://180upload.com/embed-%s.html' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "id": "captchaForm" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class allmyvideos:
def info(self):
return {
'netloc': ['allmyvideos.net'],
'host': ['Allmyvideos'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://allmyvideos.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('"file" *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class bestreams:
def info(self):
return {
'netloc': ['bestreams.net'],
'host': ['Bestreams'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://bestreams.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class clicknupload:
def info(self):
return {
'netloc': ['clicknupload.com'],
'host': ['Clicknupload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
try: post.update(captcha().worker(result))
except: pass
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="onClick")
url = [i for i in url if i.startswith('window.open')][0]
url = re.compile('[\'|\"](.+?)[\'|\"]').findall(url)[0]
return url
except:
return
class cloudzilla:
def info(self):
return {
'netloc': ['cloudzilla.to'],
'host': ['Cloudzilla'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/share/file/', '/embed/')
result = getUrl(url).result
url = re.compile('var\s+vurl *= *"(http.+?)"').findall(result)[0]
return url
except:
return
class coolcdn:
def info(self):
return {
'netloc': ['movshare.net', 'novamov.com', 'nowvideo.eu', 'nowvideo.sx', 'videoweed.es'],
'host': ['Movshare', 'Novamov', 'Nowvideo', 'Videoweed'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
netloc = urlparse.urlparse(url).netloc
netloc = netloc.replace('www.', '').replace('embed.', '')
netloc = netloc.lower()
id = re.compile('//.+?/.+?/([\w]+)').findall(url)
id += re.compile('//.+?/.+?v=([\w]+)').findall(url)
id = id[0]
url = 'http://embed.%s/embed.php?v=%s' % (netloc, id)
result = getUrl(url).result
key = re.compile('flashvars.filekey=(.+?);').findall(result)[-1]
try: key = re.compile('\s+%s="(.+?)"' % key).findall(result)[-1]
except: pass
url = 'http://www.%s/api/player.api.php?key=%s&file=%s' % (netloc, key, id)
result = getUrl(url).result
url = re.compile('url=(.+?)&').findall(result)[0]
return url
except:
return
class daclips:
def info(self):
return {
'netloc': ['daclips.in'],
'host': ['Daclips'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class dailymotion:
def info(self):
return {
'netloc': ['dailymotion.com']
}
def resolve(self, url):
try:
id = re.compile('/video/([\w]+)').findall(url)[0]
u = 'http://www.dailymotion.com/sequence/full/%s' % id
result = getUrl(u).result
result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '')
content = re.compile('"content_type":"(.+?)"').findall(result)[0]
if content == 'live':
url = re.compile('"autoURL":"(.+?)"').findall(result)[0]
protocol = urlparse.parse_qs(urlparse.urlparse(url).query)['protocol'][0]
url = url.replace('protocol=%s' % protocol, 'protocol=hls')
url += '&redirect=0'
url = getUrl(url).result
return url
else:
u = 'http://www.dailymotion.com/embed/video/%s' % id
result = getUrl(u).result
result = urllib.unquote(result).replace('\\/', '/').replace('\n', '').replace('\'', '"').replace(' ', '')
url = []
try: url += [{'quality': 'HD', 'url': getUrl(re.compile('"stream_h264_ld_url":"(.+?)"').findall(result)[0], output='geturl').result}]
except: pass
try: url += [{'quality': 'SD', 'url': getUrl(re.compile('"stream_h264_hq_url":"(.+?)"').findall(result)[0], output='geturl').result}]
except: pass
if not url == []: return url
try: url += [{'quality': 'SD', 'url': getUrl(re.compile('"stream_h264_url":"(.+?)"').findall(result)[0], output='geturl').result}]
except: pass
if not url == []: return url
try: url += [{'quality': 'SD', 'url': getUrl(re.compile('"stream_h264_ld_url":"(.+?)"').findall(result)[0], output='geturl').result}]
except: pass
if url == []: return
return url
except:
return
class datemule:
def info(self):
return {
'netloc': ['datemule.com']
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
class fastvideo:
def info(self):
return {
'netloc': ['fastvideo.in', 'faststream.in'],
'host': ['Fastvideo', 'Faststream'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://fastvideo.in/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class filehoot:
def info(self):
return {
'netloc': ['filehoot.com'],
'host': ['Filehoot'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://filehoot.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[0]
return url
except:
return
class filenuke:
def info(self):
return {
'netloc': ['filenuke.com', 'sharesix.com'],
'host': ['Filenuke', 'Sharesix'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
try: f = common.parseDOM(result, "form", attrs = { "method": "POST" })[0]
except: f = ''
k = common.parseDOM(f, "input", ret="name")
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile("var\s+lnk\d* *= *'(http.+?)'").findall(result)[0]
return url
except:
return
class googledocs:
def info(self):
return {
'netloc': ['docs.google.com', 'drive.google.com']
}
def resolve(self, url):
try:
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
result = getUrl(url).result
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
u = json.loads(result)
u = [i.split('|')[-1] for i in u.split(',')]
u = sum([self.tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(self, url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
class googleplus:
def info(self):
return {
'netloc': ['plus.google.com', 'picasaweb.google.com']
}
def resolve(self, url):
try:
result = getUrl(url).result
u = re.compile('"url"\s*:\s*"([^"]+)"\s*,\s*"height"\s*:\s*\d+\s*,\s*"width"\s*:\s*\d+\s*,\s*"type"\s*:\s*"video/').findall(result)
u += re.compile('"(http[s]*://.+?videoplayback[?].+?)"').findall(result)
u += re.compile('\d*,\d*,\d*,"(.+?)"').findall(result)
u = [i.replace('\\u003d','=').replace('\\u0026','&') for i in u][::-1]
u = sum([self.tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(self, url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
class gorillavid:
def info(self):
return {
'netloc': ['gorillavid.com', 'gorillavid.in'],
'host': ['Gorillavid'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://gorillavid.in/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=30)
response.close()
type = str(response.info()["Content-Type"])
if type == 'text/html': raise Exception()
return url
except:
return
class grifthost:
def info(self):
return {
'netloc': ['grifthost.com'],
'host': ['Grifthost'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://grifthost.com/embed-%s.html' % url
result = getUrl(url).result
try:
post = {}
f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
f = f.replace('"submit"', '"hidden"')
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
except:
pass
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class hugefiles:
def info(self):
return {
'netloc': ['hugefiles.net'],
'host': ['Hugefiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
f += common.parseDOM(result, "form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile('fileUrl\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
return url
except:
return
class ipithos:
def info(self):
return {
'netloc': ['ipithos.to'],
'host': ['Ipithos'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://ipithos.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class ishared:
def info(self):
return {
'netloc': ['ishared.eu'],
'host': ['iShared'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile('path *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class kingfiles:
def info(self):
return {
'netloc': ['kingfiles.net'],
'host': ['Kingfiles'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': ' '})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': ' '})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile("var\s+download_url *= *'(.+?)'").findall(result)[0]
return url
except:
return
class mailru:
def info(self):
return {
'netloc': ['mail.ru', 'my.mail.ru', 'videoapi.my.mail.ru']
}
def resolve(self, url):
try:
usr = re.compile('/mail/(.+?)/').findall(url)[0]
vid = re.compile('(\d*)[.]html').findall(url)[0]
url = 'http://videoapi.my.mail.ru/videos/mail/%s/_myvideo/%s.json?ver=0.2.60' % (usr, vid)
import requests
result = requests.get(url).content
cookie = requests.get(url).headers['Set-Cookie']
u = json.loads(result)['videos']
h = "|Cookie=%s" % urllib.quote(cookie)
url = []
try: url += [[{'quality': '1080p', 'url': i['url'] + h} for i in u if i['key'] == '1080p'][0]]
except: pass
try: url += [[{'quality': 'HD', 'url': i['url'] + h} for i in u if i['key'] == '720p'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i['url'] + h} for i in u if not (i['key'] == '1080p' or i ['key'] == '720p')][0]]
except: pass
if url == []: return
return url
except:
return
class mightyupload:
def info(self):
return {
'netloc': ['mightyupload.com'],
'host': ['Mightyupload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://www.mightyupload.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("file *: *'(.+?)'").findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class mooshare:
def info(self):
return {
'netloc': ['mooshare.biz'],
'host': ['Mooshare'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://mooshare.biz/embed-%s.html?play=1&confirm=Close+Ad+and+Watch+as+Free+User' % url
result = getUrl(url).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class movdivx:
def info(self):
return {
'netloc': ['movdivx.com'],
'host': ['Movdivx'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://www.movdivx.com/%s' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class movpod:
def info(self):
return {
'netloc': ['movpod.net', 'movpod.in'],
'host': ['Movpod'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = url.replace('/vid/', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://movpod.in/embed-%s.html' % url
result = getUrl(url).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
request = urllib2.Request(url)
response = urllib2.urlopen(request, timeout=30)
response.close()
type = str(response.info()["Content-Type"])
if type == 'text/html': raise Exception()
return url
except:
return
class movreel:
def info(self):
return {
'netloc': ['movreel.com'],
'host': ['Movreel'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
user = xbmcaddon.Addon().getSetting("movreel_user")
password = xbmcaddon.Addon().getSetting("movreel_password")
login = 'http://movreel.com/login.html'
post = {'op': 'login', 'login': user, 'password': password, 'redirect': url}
post = urllib.urlencode(post)
result = getUrl(url, close=False).result
result += getUrl(login, post=post, close=False).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
import time
request = urllib2.Request(url, post)
for i in range(0, 3):
try:
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
url = re.compile('(<a .+?</a>)').findall(result)
url = [i for i in url if 'Download Link' in i][-1]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
time.sleep(1)
except:
return
class mrfile:
def info(self):
return {
'netloc': ['mrfile.me'],
'host': ['Mrfile'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[-1]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = re.compile('(<a\s+href=.+?>Download\s+.+?</a>)').findall(result)[-1]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
return
class mybeststream:
def info(self):
return {
'netloc': ['mybeststream.xyz']
}
def resolve(self, url):
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')
result = getUrl(url, referer=referer).result
result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
result = unwise().worker(result)
strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
strm = [i for i in strm if i.startswith('rtmp')][0]
url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
return url
except:
return
class nosvideo:
def info(self):
return {
'netloc': ['nosvideo.com'],
'host': ['Nosvideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "method": "POST" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Free Download'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = re.compile('(eval.*?\)\)\))').findall(result)[0]
url = js().worker(result)
result = getUrl(url).result
url = common.parseDOM(result, "file")[0]
return url
except:
return
class openload:
def info(self):
return {
'netloc': ['openload.io'],
'host': ['Openload'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "span", attrs = { "id": "realdownload" })[0]
url = common.parseDOM(url, "a", ret="href")[0]
return url
except:
return
class played:
def info(self):
return {
'netloc': ['played.to'],
'host': ['Played'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = url.replace('//', '/')
url = re.compile('/.+?/([\w]+)').findall(url)[0]
url = 'http://played.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class primeshare:
def info(self):
return {
'netloc': ['primeshare.tv'],
'host': ['Primeshare'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "video")[0]
url = common.parseDOM(url, "source", ret="src", attrs = { "type": ".+?" })[0]
return url
except:
return
class promptfile:
def info(self):
return {
'netloc': ['promptfile.com'],
'host': ['Promptfile'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "method": "post" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="href", attrs = { "class": "view_dl_link" })[0]
url = getUrl(url, output='geturl', post=post).result
return url
except:
return
class sharerepo:
def info(self):
return {
'netloc': ['sharerepo.com'],
'host': ['Sharerepo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = re.compile("file *: *'(http.+?)'").findall(result)[-1]
return url
except:
return
class speedvideo:
def info(self):
return {
'netloc': ['speedvideo.net']
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://speedvideo.net/embed-%s.html' % url
result = getUrl(url).result
a = re.compile('var\s+linkfile *= *"(.+?)"').findall(result)[0]
b = re.compile('var\s+linkfile *= *base64_decode\(.+?\s+(.+?)\)').findall(result)[0]
c = re.compile('var\s+%s *= *(\d*)' % b).findall(result)[0]
import base64
url = a[:int(c)] + a[(int(c) + 10):]
url = base64.urlsafe_b64decode(url)
return url
except:
return
class stagevu:
def info(self):
return {
'netloc': ['stagevu.com'],
'host': ['StageVu'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
url = common.parseDOM(result, "embed", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class streamcloud:
def info(self):
return {
'netloc': ['streamcloud.eu'],
'host': ['Streamcloud'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://streamcloud.eu/%s' % url
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "class": "proform" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
post = post.replace('op=download1', 'op=download2')
result = getUrl(url, post=post).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class streamin:
def info(self):
return {
'netloc': ['streamin.to'],
'host': ['Streamin'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://streamin.to/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("file *: *[\'|\"](http.+?)[\'|\"]").findall(result)[-1]
return url
except:
return
class thefile:
def info(self):
return {
'netloc': ['thefile.me'],
'host': ['Thefile'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thefile.me/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
return url
except:
return
class thevideo:
def info(self):
return {
'netloc': ['thevideo.me'],
'host': ['Thevideo'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thevideo.me/embed-%s.html' % url
result = getUrl(url).result
result = result.replace('\n','')
import ast
url = re.compile("'sources' *: *(\[.+?\])").findall(result)[-1]
url = ast.literal_eval(url)
url = url[-1]['file']
return url
except:
return
class turbovideos:
def info(self):
return {
'netloc': ['turbovideos.net'],
'host': ['Turbovideos'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://turbovideos.net/embed-%s.html' % url
result = getUrl(url).result
url = re.compile('file *: *"(.+?)"').findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = re.sub(r'(\',\d*,\d*,)', r';\1', result)
url = js().worker(result)
return url
except:
return
class tusfiles:
def info(self):
return {
'netloc': ['tusfiles.net'],
'host': ['Tusfiles'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class uploadc:
def info(self):
return {
'netloc': ['uploadc.com', 'zalaa.com'],
'host': ['Uploadc', 'Zalaa'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://uploadc.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile("'file' *, *'(.+?)'").findall(result)
if len(url) > 0: return url[0]
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class uptobox:
def info(self):
return {
'netloc': ['uptobox.com'],
'host': ['Uptobox'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post = urllib.urlencode(post)
import time
for i in range(0, 3):
try:
result = getUrl(url, post=post).result
url = common.parseDOM(result, "div", attrs = { "align": ".+?" })
url = [i for i in url if 'button_upload' in i][0]
url = common.parseDOM(url, "a", ret="href")[0]
url = ['http' + i for i in url.split('http') if 'uptobox.com' in i][0]
return url
except:
time.sleep(1)
except:
return
class v_vids:
def info(self):
return {
'netloc': ['v-vids.com'],
'host': ['V-vids'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "name": "F1" })[0]
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': '', 'method_premium': ''})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
url = common.parseDOM(result, "a", ret="href", attrs = { "id": "downloadbutton" })[0]
return url
except:
return
class veehd:
def info(self):
return {
'netloc': ['veehd.com'],
}
def resolve(self, url):
try:
result = getUrl(url, close=False).result
result = result.replace('\n','')
url = re.compile('function\s*load_download.+?src\s*:\s*"(.+?)"').findall(result)[0]
url = urlparse.urljoin('http://veehd.com', url)
result = getUrl(url, close=False).result
i = common.parseDOM(result, "iframe", ret="src")
if len(i) > 0:
i = urlparse.urljoin('http://veehd.com', i[0])
getUrl(i, close=False).result
result = getUrl(url).result
url = re.compile('href *= *"([^"]+(?:mkv|mp4|avi))"').findall(result)
url += re.compile('src *= *"([^"]+(?:divx|avi))"').findall(result)
url += re.compile('"url" *: *"(.+?)"').findall(result)
url = urllib.unquote(url[0])
return url
except:
return
class vidbull:
def info(self):
return {
'netloc': ['vidbull.com'],
'host': ['Vidbull'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class videomega:
def info(self):
return {
'netloc': ['videomega.tv']
}
def resolve(self, url):
try:
url = urlparse.urlparse(url).query
url = urlparse.parse_qsl(url)[0][1]
url = 'http://videomega.tv/cdn.php?ref=%s' % url
result = getUrl(url, mobile=True).result
url = common.parseDOM(result, "source", ret="src", attrs = { "type": "video.+?" })[0]
return url
except:
return
class videopremium:
def info(self):
return {
'netloc': ['videopremium.tv', 'videopremium.me']
}
def resolve(self, url):
try:
result = getUrl(url).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update({'method_free': 'Watch Free!'})
post = urllib.urlencode(post)
result = getUrl(url, post=post).result
result = common.parseDOM(result, "script", attrs = { "type": ".+?" })
result = (''.join(result)).replace(' ','').replace('\'','"')
result = re.compile('file:"(http.+?m3u8)"').findall(result)
for url in result:
try: return getUrl(url, output='geturl').result
except: pass
except:
return
class vidplay:
def info(self):
return {
'netloc': ['vidplay.net'],
'host': ['Vidplay'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
u = 'http://vidplay.net/vidembed-%s' % url
url = getUrl(u, output='geturl').result
if u == url: raise Exception()
return url
except:
return
class vidspot:
def info(self):
return {
'netloc': ['vidspot.net'],
'host': ['Vidspot'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidspot.net/embed-%s.html' % url
result = getUrl(url, mobile=True).result
url = re.compile('"file" *: *"(http.+?)"').findall(result)[-1]
query = urlparse.urlparse(url).query
url = url[:url.find('?')]
url = '%s?%s&direct=false' % (url, query)
return url
except:
return
class vidto:
def info(self):
return {
'netloc': ['vidto.me'],
'host': ['Vidto'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vidto.me/embed-%s.html' % url
result = getUrl(url).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = re.sub(r'(\',\d*,\d*,)', r';\1', result)
url = js().worker(result)
return url
except:
return
class vidzi:
def info(self):
return {
'netloc': ['vidzi.tv'],
'host': ['Vidzi'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, mobile=True).result
result = result.replace('\n','')
result = re.compile('sources *: *\[.+?\]').findall(result)[-1]
result = re.compile('file *: *"(http.+?)"').findall(result)
url = [i for i in result if '.m3u8' in i]
if len(url) > 0: return url[0]
url = [i for i in result if not '.m3u8' in i]
if len(url) > 0: return url[0]
except:
return
class vimeo:
def info(self):
return {
'netloc': ['vimeo.com']
}
def resolve(self, url):
try:
url = [i for i in url.split('/') if i.isdigit()][-1]
url = 'http://player.vimeo.com/video/%s/config' % url
result = getUrl(url).result
result = json.loads(result)
u = result['request']['files']['h264']
url = None
try: url = u['hd']['url']
except: pass
try: url = u['sd']['url']
except: pass
return url
except:
return
class vk:
def info(self):
return {
'netloc': ['vk.com']
}
def resolve(self, url):
try:
url = url.replace('https://', 'http://')
result = getUrl(url).result
u = re.compile('url(720|540|480|360|240)=(.+?)&').findall(result)
url = []
try: url += [[{'quality': 'HD', 'url': i[1]} for i in u if i[0] == '720'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '540'][0]]
except: pass
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '480'][0]]
except: pass
if not url == []: return url
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '360'][0]]
except: pass
if not url == []: return url
try: url += [[{'quality': 'SD', 'url': i[1]} for i in u if i[0] == '240'][0]]
except: pass
if url == []: return
return url
except:
return
class vodlocker:
def info(self):
return {
'netloc': ['vodlocker.com'],
'host': ['Vodlocker'],
'quality': 'Low',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://vodlocker.com/embed-%s.html' % url
result = getUrl(url).result
url = re.compile('[\'|\"](http.+?[\w]+)[\'|\"]').findall(result)
url = [i for i in url if i.endswith(('.mp4', '.mkv', '.flv', '.avi'))][0]
return url
except:
return
class xfileload:
def info(self):
return {
'netloc': ['xfileload.com'],
'host': ['Xfileload'],
'quality': 'High',
'captcha': True,
'a/c': False
}
def resolve(self, url):
try:
result = getUrl(url, close=False).result
post = {}
f = common.parseDOM(result, "Form", attrs = { "action": "" })
k = common.parseDOM(f, "input", ret="name", attrs = { "type": "hidden" })
for i in k: post.update({i: common.parseDOM(f, "input", ret="value", attrs = { "name": i })[0]})
post.update(captcha().worker(result))
post = urllib.urlencode(post)
import time
request = urllib2.Request(url, post)
for i in range(0, 5):
try:
response = urllib2.urlopen(request, timeout=10)
result = response.read()
response.close()
if 'download2' in result: raise Exception()
url = common.parseDOM(result, "a", ret="href", attrs = { "target": "" })[0]
return url
except:
time.sleep(1)
except:
return
class xvidstage:
def info(self):
return {
'netloc': ['xvidstage.com'],
'host': ['Xvidstage'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://xvidstage.com/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return
class youtube:
def info(self):
return {
'netloc': ['youtube.com'],
'host': ['Youtube'],
'quality': 'Medium',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
id = url.split("?v=")[-1].split("/")[-1].split("?")[0].split("&")[0]
result = getUrl('http://www.youtube.com/watch?v=%s' % id).result
message = common.parseDOM(result, "div", attrs = { "id": "unavailable-submessage" })
message = ''.join(message)
alert = common.parseDOM(result, "div", attrs = { "id": "watch7-notification-area" })
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
class zettahost:
def info(self):
return {
'netloc': ['zettahost.tv'],
'host': ['Zettahost'],
'quality': 'High',
'captcha': False,
'a/c': False
}
def resolve(self, url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://zettahost.tv/embed-%s.html' % url
result = getUrl(url, mobile=True).result
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
url = js().worker(result)
return url
except:
return | gpl-3.0 | -5,052,029,704,995,147,000 | 31.204131 | 186 | 0.452184 | false | 3.892399 | false | false | false | 0.014929 |
PhloxAR/phloxar | PhloxAR/core/stream.py | 1 | 9045 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from ..compat import socketserver
from ..compat import SimpleHTTPServer
from ..base import cv2
import time
import socket
import re
import threading
__all__ = [
'JpegStreamHandler', 'JpegStreamer', 'JpegTCPServer', 'VideoStream'
]
_jpeg_streamers = {}
class JpegStreamHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
Handles requests to the threaded HTTP server.
Once initialized, any request to this port will receive
a multipart/replace jpeg.
"""
def get(self):
global _jpeg_streamers
if self.path == '/' or not self.path:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("""
<html>
<head>
<style type=text/css>
body {
background-image: url(/stream);
background-repeat: no-repeat;
background-position: center top;
background-attachment: fixed;
height: 100%;
}
</style>
</head>
<body>
 
</body>
</html>
""")
return
elif self.path == '/stream':
self.send_response(200)
self.send_header('Connection', 'close')
self.send_header('Max-Age', '0')
self.send_header('Expires', '0')
self.send_header('Cache-Control', 'no-cache, private')
self.send_header('Pragma', 'no-cache')
self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=--BOUNDARYSTRING')
self.end_headers()
host, port = self.server.socket.getsockname()[:2]
count = 0
timeout = 0.75
last_time_served = 0
while True:
if (_jpeg_streamers[port].refreshtime > last_time_served or
time.time() - timeout > last_time_served):
try:
self.wfile.write('--BOUNDARYSTRING\r\n')
self.send_header('Content-type', 'image/jpeg')
self.send_header('Content-Length', str(len(
_jpeg_streamers[port].jpgdata.getvalue()
)))
self.end_headers()
self.wfile.write(_jpeg_streamers[port].jpgdata.getvalue() + '\r\n')
last_time_served = time.time()
except socket.error:
return
except IOError:
return
count += 1
time.sleep(_jpeg_streamers[port].sleeptime)
class JpegTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
daemon_threads = True
# factory class for jpeg tcp server.
class JpegStreamer(object):
"""
Allow user to stream a jpeg encoded file to a HTTP port. Any
updates to the jpeg file will automatically be pushed to the
browser via multipart/replace content type.
initialization:
js = JpegStreamer()
update:
img.save(js)
open a browser and display:
import webbrowser
webbrowser.open(js.url)
Note 3 optional parameters on the constructor:
- port (default 8080) which sets the TCP port you need to connect to
- sleep time (default 0.1) how often to update. Above 1 second seems
to cause dropped connections in Google chrome Once initialized,
the buffer and sleeptime can be modified and will function
properly -- port will not.
"""
server = ''
host = ''
port = ''
sleep_time = ''
frame_buffer = ''
counter = 0
refresh_time = 0
def __init__(self, host_port=8080, sleeptime=0.1):
global _jpeg_streamers
if isinstance(host_port, int):
self.port = host_port
self.host = 'localhost'
elif isinstance(host_port, str) and re.search(':', host_port):
self.host, self.port = host_port.split(':')
self.port = int(self.port)
elif isinstance(host_port, tuple):
self.host, self.port = host_port
else:
self.port = 8080
self.host = 'localhost'
self.sleep_time = sleeptime
self.server = JpegTCPServer((self.host, self.host), JpegStreamHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
_jpeg_streamers[self.port] = self
self.server_thread.daemon = True
self.server_thread.start()
self.frame_buffer = self
def url(self):
"""
Returns the JpegStreams Webbrowser-appropriate URL, if not provided
in the constructor, it defaults to "http://localhost:8080"
:return: url
"""
return 'http://' + self.host + ':' + str(self.port) + '/'
def stream_url(self):
"""
Returns the URL of the MJPEG stream. If host and port are not set in
the constructor, defaults to "http://localhost:8080/stream/"
:return: url
"""
return self.url() + 'stream'
class VideoStream(object):
"""
Allows user save video files in different formats.
You can initialize it by specifying the file you want to output::
vs = VideoStream("hello.avi")
You can also specify a framerate, and if you want to "fill" in
missed frames. So if you want to record a real time video you may
want to do this::
# note these are default values
vs = VideoStream("myvideo.avi", 25, True)
Where if you want to do a stop-motion animation, you would want to
turn fill off::
vs_animation = VideoStream("cartoon.avi", 15, False)
If you select a fill, the VideoStream will do its best to stay
close to "real time" by duplicating frames or dropping frames
when the clock doesn't sync up with the file writes.
You can save a frame to the video by using the Image.save() function::
my_camera.getImage().save(vs)
"""
fps = 25
filename = ''
writer = ''
fourcc = ''
frame_fill = True
video_time = 0.0
start_time = 0.0
frame_count = 0
last_frame = None
def __init__(self, filename, fps=25, frame_fill=True):
"""
TODO: details
:param filename:
:param fps:
:param frame_fill:
"""
self.filename = filename
self.fps = fps
self.frame_fill = frame_fill
self.fourcc = cv2.VideoWriter_fourcc('I', 'Y', 'U', 'V')
def init_writer(self, size):
"""
TODO: details
:param size:
:return:
"""
self.writer = cv2.VideoWriter(self.filename, self.fourcc, self.fps,
size, 1)
self.video_time = 0.0
self.start_time = time.time()
def write_frame(self, img):
"""
Write a frame to the display object. this is automatically called
by image.save() but you can use this function to save just the
bitmap as well so image markup is not implicit,typically you use
image.save() but this allows for more finer control
Args:
img (Image, array like): the image to be write
Returns:
None
"""
if not self.writer:
self.init_writer(img.size)
self.last_frame = img
frame_time = 1.0 / float(self.fps)
target_time = self.start_time + frame_time * self.frame_count
real_time = time.time()
if self.frame_fill:
# see if we need to do anything to adjust to real time
if target_time > real_time + frame_time:
# if we're more than one frame ahead,
# save the last_frame, but don't write to video out
self.last_frame = img
return
elif target_time < real_time - frame_time:
# we're at least one frame behind
frames_behind = int((real_time - target_time) * self.fps) + 1
# figure out how many frames behind we are
last_frames = frames_behind / 2
for i in range(0, last_frames):
self.frame_count += 1
self.writer.write(self.last_frame.narray)
frames = frames_behind - last_frames
for i in range(0, frames):
self.frame_count += 1
self.writer.write(img.narray)
else:
self.frame_count += 1
self.writer.write(img.narray)
else:
self.frame_count += 1
self.writer.write(img.narray)
self.last_frame = img
| apache-2.0 | -792,898,045,263,677,200 | 32.131868 | 100 | 0.549475 | false | 4.244486 | false | false | false | 0.000221 |
sodafree/backend | build/lib.linux-i686-2.7/django/contrib/gis/geos/collections.py | 92 | 4637 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
from ctypes import c_int, c_uint, byref
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import get_pointer_arr, GEOS_PREPARE
from django.contrib.gis.geos.linestring import LineString, LinearRing
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.contrib.gis.geos import prototypes as capi
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid: self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join([g.kml for g in self])
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple([g.tuple for g in self])
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
if GEOS_PREPARE:
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
else:
raise GEOSException('The cascaded union operation requires GEOS 3.1+.')
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| bsd-3-clause | 5,024,250,141,670,946,000 | 36.699187 | 113 | 0.666379 | false | 4.273733 | false | false | false | 0.004529 |
redwoodsystems/GoogleCalendar-Connector | reaper/motion.py | 1 | 6432 | #Encapsulates calls to the Redwood Motion REST API
import httplib2
import json
import datetime, time, pytz
from django.utils import timezone
def call_motion_api(url,user,pwd,location_id):
print "call_motion_api"
uri_path = url+"/location/"+str(location_id)+"/sensorStats/motion/instant"
#h = httplib2.Http(".cache",disable_ssl_certificate_validation=True)
h = httplib2.Http(disable_ssl_certificate_validation=True)
h.add_credentials(user, pwd)
resp,content = h.request(uri_path,"GET", headers={'content-type':'application/json'})
motion=0
if resp and resp.has_key('status') and resp['status'] =='200':
print "resp=",resp
print "content=",content
motion = parse_content(content)
return int(motion)
def parse_content(content):
return content
def parse_curr_location_fixtures(loc):
fix_list = []
if 'childFixture' in loc:
for fx in loc['childFixture']:
fix_list.append(fx.replace("/fixture/",""))
return fix_list
def get_child_location_ids(loc):
#Find child locations and obtain their fixtures
loc_ids = []
if 'childLocation' in loc:
for l in loc['childLocation']:
print l
print l.replace("/location/","")
loc_ids.append(int(l.replace("/location/","")))
return loc_ids
def call_location_api(url,user,pwd,location_id):
#return json for specified location
uri_path = url+"/location/"+str(location_id)
h = httplib2.Http(disable_ssl_certificate_validation=True)
h.add_credentials(user, pwd)
resp,content = h.request(uri_path,"GET", headers={'content-type':'application/json'})
loc = None
if resp and resp.has_key('status') and resp['status'] =='200':
print "resp=",resp
print "content=",content
loc = json.loads(content)
return loc
def get_location_fixtures2(loc, url,user,pwd,location_id):
#return array of fixture names for a specific location and for child locations
fix_list = []
#obtain json for current location
if not loc:
loc = call_location_api(url,user,pwd,location_id)
#get fixtures at current level
fix_list.extend(parse_curr_location_fixtures(loc))
#check if we have child locations
ch_loc_ids = get_child_location_ids(loc)
#obtain fixtures for each child locations
#we assume that locations are only 2 levels (so need for recursion)
for ch_loc_id in ch_loc_ids:
ch_loc = call_location_api(url,user,pwd,ch_loc_id)
fix_list.extend(parse_curr_location_fixtures(ch_loc))
return fix_list
def get_motion_instant(loc):
motion = -1
if "sensorStats" in loc and "motion" in loc["sensorStats"]:
instant = loc["sensorStats"]["motion"]["instant"]
if instant:
motion = int(instant)
return motion
def get_occupancy(url,user,pwd,location_id, current_time, occ_threshold):
# TODO: Combine motion instant and occupancy pct methods
#
# Build array of fixture ids from location
# Find number of fixtures from location
# For each fixture:
# Find motion instant
#
print "get_occupancy"
motion = 0
occupied_pct = None
loc = call_location_api(url,user,pwd,location_id)
fix_list = get_location_fixtures2(loc,url,user,pwd,location_id)
motion = get_motion_instant(loc)
motion_list = []
if len(fix_list) >0:
#we found some fixtures under this location
#obtain motion instants for each fixture in the list
motion_list = call_fixture_motion_api(url,user,pwd,fix_list)
print "motion_list=",motion_list
#obtain occupancy for each fixture
cnt_occupied = 0;
for m in motion_list:
if not m["dt"]:
m["dt"] = current_time
vacant_secs = get_pos_time_diff_secs(current_time, m["dt"])
print "fx vacant_secs=",vacant_secs
if vacant_secs <= occ_threshold:
cnt_occupied = cnt_occupied +1
occupied_pct = (cnt_occupied * 100)/len(fix_list)
print "location_id: %d num_fixtures: %d cnt_occupied: %d occupied pct: %f"%(location_id,
len(fix_list), cnt_occupied, occupied_pct)
return (occupied_pct, motion)
def call_fixture_motion_api(url,user,pwd,filter_list):
#return array of fixtures and motion instants filtered by fixture list
print "call_fixture_motion_api"
ret_list=[]
uri_path = url+"/fixture"
h = httplib2.Http(disable_ssl_certificate_validation=True)
h.add_credentials(user, pwd)
resp,content = h.request(uri_path,"GET", headers={'content-type':'application/json'})
if resp and resp.has_key('status') and resp['status'] =='200':
print "resp=",resp
#print "content=",content
fx_arr = json.loads(content)
if not fx_arr or len(fx_arr) ==0:
return None
all_fixtures = {}
for fx in fx_arr:
all_fixtures[fx["serialNum"]] = fx
for fname in filter_list:
print fname
if fname in all_fixtures:
print fname+" found"
fx = all_fixtures[fname]
if "sensorStats" in fx and "motion" in fx["sensorStats"]:
print fx
instant = fx["sensorStats"]["motion"]["instant"]
dt = datetime.datetime.utcfromtimestamp(instant).replace(tzinfo=pytz.timezone('utc'))
ret_list.append({"fixtureName":fx["serialNum"],
"instant": instant,
"dt":dt })
return ret_list
def get_pos_time_diff_secs(dt1, dt2):
#Assumes that both dates are timzone aware and stored in UTC
diff = dt1 - dt2
if diff.days >=0:
return diff.seconds
else:
return 0
def main():
current_time = timezone.now()
get_occupancy('https://yyyyyy.xxxxxxxx.com:26443/rApi', "admin", "password", 101, current_time, 300)
return
if __name__ == '__main__':
main()
| apache-2.0 | 4,756,116,271,277,566,000 | 31.321608 | 126 | 0.581934 | false | 3.898182 | false | false | false | 0.022233 |
sbc100/native_client | src/trusted/validator_ragel/PRESUBMIT.py | 12 | 4775 | # Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Documentation on PRESUBMIT.py can be found at:
# http://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
import json
import hashlib
import os
import re
import gclient_utils
def CheckChange(input_api, message_constructor):
"""Checks for files with a modified contents.
Some checking of validator happens on builbots, but comprehensive enumeration
tests must be run locally.
There are two dangers:
1. Source code for autogenerated files can be modified without regeneration
of said files.
2. Source of validator can be changed without running the aforementioned
tests.
This function catches the situation when source files for validator_x86_??.c
are changed but files are not regenerated and it also catches the situation
when code is changed without running the dfacheckvalidator tests.
"""
errors = []
changelist = input_api.change
root_path = changelist.RepositoryRoot()
if input_api.change.scm == 'svn':
try:
# With SVN you can decide to commit not all modified files but some of
# them thus separate GetAllModifiedFiles() and GetModifiedFiles() lists
# are provided. We need to remove root_path from the name of file.
assert all(filename.startswith(root_path + os.path.sep)
for filename in changelist.GetAllModifiedFiles())
all_filenames = [filename[len(root_path + os.path.sep):]
for filename in changelist.GetAllModifiedFiles()]
assert all(filename.startswith(root_path + os.path.sep)
for filename in changelist.GetModifiedFiles())
modified_filenames = [filename[len(root_path + os.path.sep):]
for filename in changelist.GetModifiedFiles()]
except:
# If gcl is not available (which happens in CQ bots) then we'll try to use
# AffectedFiles() instead of GetAllModifiedFiles()
all_filenames = [file.LocalPath() for file in changelist.AffectedFiles()]
modified_filenames = all_filenames
else:
# With GIT you must commit all modified files thus only AffectedFiles()
# list is provided.
all_filenames = [file.LocalPath() for file in changelist.AffectedFiles()]
modified_filenames = all_filenames
json_filename = os.path.join(
'src', 'trusted', 'validator_ragel', 'gen', 'protected_files.json')
protected_files = json.loads(
gclient_utils.FileRead(os.path.join(root_path, json_filename)))
need_dfagen = False
need_dfacheckvalidator = False
canonical_prefix = 'native_client/'
for filename in sorted(all_filenames):
canonical_filename = canonical_prefix + filename.replace('\\', '/')
if canonical_filename in protected_files['validator']:
file_contents = gclient_utils.FileRead(os.path.join(root_path, filename))
sha512 = hashlib.sha512(file_contents).hexdigest()
if sha512 != protected_files['validator'][canonical_filename]:
errors.append(message_constructor(
'Incorrect {0} hash:\n expected {1}\n got {2}'.format(
canonical_filename,
protected_files['validator'][canonical_filename],
sha512)))
need_dfacheckvalidator = True
if canonical_filename in protected_files['generating']:
for automaton_filename in protected_files['generated']:
if (os.stat(os.path.join(root_path, filename)).st_mtime >
os.stat(os.path.join(root_path,
automaton_filename[len(canonical_prefix):])).st_mtime):
errors.append(message_constructor(
'File {0} is older then {1}'.format(
automaton_filename, canonical_filename)))
need_dfagen = True
if (canonical_filename in protected_files['validator'] or
canonical_filename in protected_files['generating'] or
filename == json_filename):
if filename not in modified_filenames:
errors.append(message_constructor(
'File {0} is changed but is excluded from this CL'.format(
canonical_filename)))
if need_dfagen:
errors.append(message_constructor(
'Please run "./scons dfagen" before commit!'))
if need_dfacheckvalidator:
errors.append(message_constructor(
'Please run "./scons dfacheckvalidator" before commit!'))
return errors
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api,
message_constructor=output_api.PresubmitPromptWarning)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api,
message_constructor=output_api.PresubmitError)
| bsd-3-clause | -7,773,188,843,427,686,000 | 38.791667 | 80 | 0.687539 | false | 4.163034 | false | false | false | 0.008586 |
Lucretiel/MachineLearningDigits | src/regressions.py | 1 | 1871 | from numpy import matrix, array, exp, zeros
from numpy.linalg import pinv
from common import Weights, poly_space
from function_registry import FunctionRegistry
from random import random
all_regressions = FunctionRegistry()
def make_XY(data, degree, digit1, digit2, output_type):
feature_data = []
numeral_data = []
digits = {
digit1: 1,
digit2: -1
}
for point in data:
feature_data.append(
poly_space(
point.x_feature,
point.y_feature,
degree))
numeral_data.append([digits[point.numeral]])
X = output_type(feature_data)
Y = output_type(numeral_data)
return X, Y
@all_regressions.register
def linear_regression(data, degree, digit1, digit2):
X, Y = make_XY(data, degree, digit1, digit2, matrix)
X_pinv = pinv(feature_data)
w = X_pinv * Y
result = w.transpose().tolist()[0]
return Weights(result, degree)
def list_add(l1, l2):
return [a + b for a, b in zip(l1, l2)]
import sys
@all_regressions.register
def logistic_regression(data, degree, digit1, digit2):
digits = {
digit1: 1,
digit2: -1
}
X = []
Y = []
for point in data:
X.append(poly_space(point.x_feature, point.y_feature, degree))
Y.append([digits[point.numeral]])
X = array(X)
Y = array(Y)
products = X * Y
N = len(X)
W = array([random() for _ in X[0]])
def summation(w):
for x_y in products:
#print (x_y, file=sys.stderr)
yield x_y / (1 + exp(x_y * w))
def gradient(w):
return sum(summation(w))/N
num_iterations = 500
n = 1
threshold = 0.001
for t in range(num_iterations):
g = gradient(W)
W = (W + n*g)
if(sum(g**2) < threshold):
break
return Weights(W.tolist(), degree)
| gpl-3.0 | 7,206,799,075,764,976,000 | 22.098765 | 70 | 0.576162 | false | 3.288225 | false | false | false | 0.005345 |
tropp/acq4 | acq4/analysis/modules/Photostim/MapAnalysisTemplate.py | 4 | 5508 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './acq4/analysis/modules/Photostim/MapAnalysisTemplate.ui'
#
# Created: Tue Dec 24 01:49:12 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(208, 349)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.rateAverageSpin = SpinBox(Form)
self.rateAverageSpin.setObjectName(_fromUtf8("rateAverageSpin"))
self.gridLayout.addWidget(self.rateAverageSpin, 4, 1, 1, 1)
self.label = QtGui.QLabel(Form)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 4, 0, 1, 1)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.poisMaxCheck = QtGui.QCheckBox(self.groupBox_2)
self.poisMaxCheck.setObjectName(_fromUtf8("poisMaxCheck"))
self.gridLayout_3.addWidget(self.poisMaxCheck, 2, 0, 1, 1)
self.poisMaxAmpCheck = QtGui.QCheckBox(self.groupBox_2)
self.poisMaxAmpCheck.setObjectName(_fromUtf8("poisMaxAmpCheck"))
self.gridLayout_3.addWidget(self.poisMaxAmpCheck, 3, 0, 1, 1)
self.chargeTransferCheck = QtGui.QCheckBox(self.groupBox_2)
self.chargeTransferCheck.setObjectName(_fromUtf8("chargeTransferCheck"))
self.gridLayout_3.addWidget(self.chargeTransferCheck, 0, 0, 1, 1)
self.eventCountCheck = QtGui.QCheckBox(self.groupBox_2)
self.eventCountCheck.setObjectName(_fromUtf8("eventCountCheck"))
self.gridLayout_3.addWidget(self.eventCountCheck, 1, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_2, 3, 0, 1, 2)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.excitatoryRadio = QtGui.QRadioButton(self.groupBox)
self.excitatoryRadio.setObjectName(_fromUtf8("excitatoryRadio"))
self.gridLayout_2.addWidget(self.excitatoryRadio, 0, 0, 1, 1)
self.fitErrorSpin = SpinBox(self.groupBox)
self.fitErrorSpin.setObjectName(_fromUtf8("fitErrorSpin"))
self.gridLayout_2.addWidget(self.fitErrorSpin, 1, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.inhibitoryRadio = QtGui.QRadioButton(self.groupBox)
self.inhibitoryRadio.setObjectName(_fromUtf8("inhibitoryRadio"))
self.gridLayout_2.addWidget(self.inhibitoryRadio, 0, 1, 1, 1)
self.gridLayout.addWidget(self.groupBox, 2, 0, 1, 2)
self.groupBox_3 = QtGui.QGroupBox(Form)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_4 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_3 = QtGui.QLabel(self.groupBox_3)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_4.addWidget(self.label_3, 0, 0, 1, 1)
self.thresholdSpin = SpinBox(self.groupBox_3)
self.thresholdSpin.setObjectName(_fromUtf8("thresholdSpin"))
self.gridLayout_4.addWidget(self.thresholdSpin, 0, 1, 1, 1)
self.detectionHistogram = PlotWidget(self.groupBox_3)
self.detectionHistogram.setObjectName(_fromUtf8("detectionHistogram"))
self.gridLayout_4.addWidget(self.detectionHistogram, 1, 0, 1, 2)
self.gridLayout.addWidget(self.groupBox_3, 5, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.label.setText(_translate("Form", "Rate Average Window", None))
self.groupBox_2.setTitle(_translate("Form", "Detection Methods", None))
self.poisMaxCheck.setText(_translate("Form", "Poisson max probability", None))
self.poisMaxAmpCheck.setText(_translate("Form", "Poisson max + amplitude", None))
self.chargeTransferCheck.setText(_translate("Form", "Charge transfer z-score", None))
self.eventCountCheck.setText(_translate("Form", "Event Count", None))
self.groupBox.setTitle(_translate("Form", "Event Selection", None))
self.excitatoryRadio.setText(_translate("Form", "Excitatory", None))
self.label_2.setText(_translate("Form", "Fit Error Limit", None))
self.inhibitoryRadio.setText(_translate("Form", "Inhibitory", None))
self.label_3.setText(_translate("Form", "Detection Threshold", None))
from acq4.pyqtgraph import SpinBox, PlotWidget
| mit | -4,395,419,766,249,245,700 | 51.961538 | 111 | 0.692266 | false | 3.611803 | false | false | false | 0.001634 |
j2a/pytils | pytils/dt.py | 1 | 7948 | # -*- coding: utf-8 -*-
# -*- test-case-name: pytils.test.test_dt -*-
"""
Russian dates without locales
"""
import datetime
from pytils import numeral
from pytils.utils import check_positive
from pytils.third import six
DAY_ALTERNATIVES = {
1: (u"вчера", u"завтра"),
2: (u"позавчера", u"послезавтра")
} #: Day alternatives (i.e. one day ago -> yesterday)
DAY_VARIANTS = (
u"день",
u"дня",
u"дней",
) #: Forms (1, 2, 5) for noun 'day'
HOUR_VARIANTS = (
u"час",
u"часа",
u"часов",
) #: Forms (1, 2, 5) for noun 'hour'
MINUTE_VARIANTS = (
u"минуту",
u"минуты",
u"минут",
) #: Forms (1, 2, 5) for noun 'minute'
PREFIX_IN = u"через" #: Prefix 'in' (i.e. B{in} three hours)
SUFFIX_AGO = u"назад" #: Prefix 'ago' (i.e. three hours B{ago})
MONTH_NAMES = (
(u"янв", u"январь", u"января"),
(u"фев", u"февраль", u"февраля"),
(u"мар", u"март", u"марта"),
(u"апр", u"апрель", u"апреля"),
(u"май", u"май", u"мая"),
(u"июн", u"июнь", u"июня"),
(u"июл", u"июль", u"июля"),
(u"авг", u"август", u"августа"),
(u"сен", u"сентябрь", u"сентября"),
(u"окт", u"октябрь", u"октября"),
(u"ноя", u"ноябрь", u"ноября"),
(u"дек", u"декабрь", u"декабря"),
) #: Month names (abbreviated, full, inflected)
DAY_NAMES = (
(u"пн", u"понедельник", u"понедельник", u"в\xa0"),
(u"вт", u"вторник", u"вторник", u"во\xa0"),
(u"ср", u"среда", u"среду", u"в\xa0"),
(u"чт", u"четверг", u"четверг", u"в\xa0"),
(u"пт", u"пятница", u"пятницу", u"в\xa0"),
(u"сб", u"суббота", u"субботу", u"в\xa0"),
(u"вск", u"воскресенье", u"воскресенье", u"в\xa0")
) #: Day names (abbreviated, full, inflected, preposition)
def distance_of_time_in_words(from_time, accuracy=1, to_time=None):
"""
Represents distance of time in words
@param from_time: source time (in seconds from epoch)
@type from_time: C{int}, C{float} or C{datetime.datetime}
@param accuracy: level of accuracy (1..3), default=1
@type accuracy: C{int}
@param to_time: target time (in seconds from epoch),
default=None translates to current time
@type to_time: C{int}, C{float} or C{datetime.datetime}
@return: distance of time in words
@rtype: unicode
@raise ValueError: accuracy is lesser or equal zero
"""
current = False
if to_time is None:
current = True
to_time = datetime.datetime.now()
check_positive(accuracy, strict=True)
if not isinstance(from_time, datetime.datetime):
from_time = datetime.datetime.fromtimestamp(from_time)
if not isinstance(to_time, datetime.datetime):
to_time = datetime.datetime.fromtimestamp(to_time)
if from_time.tzinfo and not to_time.tzinfo:
to_time = to_time.replace(tzinfo=from_time.tzinfo)
dt_delta = to_time - from_time
difference = dt_delta.days*86400 + dt_delta.seconds
minutes_orig = int(abs(difference)/60.0)
hours_orig = int(abs(difference)/3600.0)
days_orig = int(abs(difference)/86400.0)
in_future = from_time > to_time
words = []
values = []
alternatives = []
days = days_orig
hours = hours_orig - days_orig*24
words.append(u"%d %s" % (days, numeral.choose_plural(days, DAY_VARIANTS)))
values.append(days)
words.append(u"%d %s" %
(hours, numeral.choose_plural(hours, HOUR_VARIANTS)))
values.append(hours)
days == 0 and hours == 1 and current and alternatives.append(u"час")
minutes = minutes_orig - hours_orig*60
words.append(u"%d %s" % (minutes,
numeral.choose_plural(minutes, MINUTE_VARIANTS)))
values.append(minutes)
days == 0 and hours == 0 and minutes == 1 and current and \
alternatives.append(u"минуту")
# убираем из values и words конечные нули
while values and not values[-1]:
values.pop()
words.pop()
# убираем из values и words начальные нули
while values and not values[0]:
values.pop(0)
words.pop(0)
limit = min(accuracy, len(words))
real_words = words[:limit]
real_values = values[:limit]
# снова убираем конечные нули
while real_values and not real_values[-1]:
real_values.pop()
real_words.pop()
limit -= 1
real_str = u" ".join(real_words)
# альтернативные варианты нужны только если в real_words одно значение
# и, вдобавок, если используется текущее время
alter_str = limit == 1 and current and alternatives and \
alternatives[0]
_result_str = alter_str or real_str
result_str = in_future and u"%s %s" % (PREFIX_IN, _result_str) \
or u"%s %s" % (_result_str, SUFFIX_AGO)
# если же прошло менее минуты, то real_words -- пустой, и поэтому
# нужно брать alternatives[0], а не result_str
zero_str = minutes == 0 and not real_words and \
(in_future and u"менее чем через минуту"
or u"менее минуты назад")
# нужно использовать вчера/позавчера/завтра/послезавтра
# если days 1..2 и в real_words одно значение
day_alternatives = DAY_ALTERNATIVES.get(days, False)
alternate_day = day_alternatives and current and limit == 1 and \
((in_future and day_alternatives[1])
or day_alternatives[0])
final_str = not real_words and zero_str or alternate_day or result_str
return final_str
def ru_strftime(format=u"%d.%m.%Y", date=None, inflected=False,
inflected_day=False, preposition=False):
"""
Russian strftime without locale
@param format: strftime format, default=u'%d.%m.%Y'
@type format: C{unicode}
@param date: date value, default=None translates to today
@type date: C{datetime.date} or C{datetime.datetime}
@param inflected: is month inflected, default False
@type inflected: C{bool}
@param inflected_day: is day inflected, default False
@type inflected: C{bool}
@param preposition: is preposition used, default False
preposition=True automatically implies inflected_day=True
@type preposition: C{bool}
@return: strftime string
@rtype: unicode
"""
if date is None:
date = datetime.datetime.today()
weekday = date.weekday()
prepos = preposition and DAY_NAMES[weekday][3] or u""
month_idx = inflected and 2 or 1
day_idx = (inflected_day or preposition) and 2 or 1
# for russian typography standard,
# 1 April 2007, but 01.04.2007
if u'%b' in format or u'%B' in format:
format = format.replace(u'%d', six.text_type(date.day))
format = format.replace(u'%a', prepos+DAY_NAMES[weekday][0])
format = format.replace(u'%A', prepos+DAY_NAMES[weekday][day_idx])
format = format.replace(u'%b', MONTH_NAMES[date.month-1][0])
format = format.replace(u'%B', MONTH_NAMES[date.month-1][month_idx])
# Python 2: strftime's argument must be str
# Python 3: strftime's argument str, not a bitestring
if six.PY2:
# strftime must be str, so encode it to utf8:
s_format = format.encode("utf-8")
s_res = date.strftime(s_format)
# and back to unicode
u_res = s_res.decode("utf-8")
else:
u_res = date.strftime(format)
return u_res
| mit | 412,087,392,622,969,600 | 30.081545 | 78 | 0.620961 | false | 2.603163 | false | false | false | 0 |
skuda/client-python | kubernetes/client/models/v2alpha1_metric_spec.py | 1 | 6346 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V2alpha1MetricSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, object=None, pods=None, resource=None, type=None):
"""
V2alpha1MetricSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'object': 'V2alpha1ObjectMetricSource',
'pods': 'V2alpha1PodsMetricSource',
'resource': 'V2alpha1ResourceMetricSource',
'type': 'str'
}
self.attribute_map = {
'object': 'object',
'pods': 'pods',
'resource': 'resource',
'type': 'type'
}
self._object = object
self._pods = pods
self._resource = resource
self._type = type
@property
def object(self):
"""
Gets the object of this V2alpha1MetricSpec.
object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
:return: The object of this V2alpha1MetricSpec.
:rtype: V2alpha1ObjectMetricSource
"""
return self._object
@object.setter
def object(self, object):
"""
Sets the object of this V2alpha1MetricSpec.
object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).
:param object: The object of this V2alpha1MetricSpec.
:type: V2alpha1ObjectMetricSource
"""
self._object = object
@property
def pods(self):
"""
Gets the pods of this V2alpha1MetricSpec.
pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
:return: The pods of this V2alpha1MetricSpec.
:rtype: V2alpha1PodsMetricSource
"""
return self._pods
@pods.setter
def pods(self, pods):
"""
Sets the pods of this V2alpha1MetricSpec.
pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.
:param pods: The pods of this V2alpha1MetricSpec.
:type: V2alpha1PodsMetricSource
"""
self._pods = pods
@property
def resource(self):
"""
Gets the resource of this V2alpha1MetricSpec.
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.
:return: The resource of this V2alpha1MetricSpec.
:rtype: V2alpha1ResourceMetricSource
"""
return self._resource
@resource.setter
def resource(self, resource):
"""
Sets the resource of this V2alpha1MetricSpec.
resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.
:param resource: The resource of this V2alpha1MetricSpec.
:type: V2alpha1ResourceMetricSource
"""
self._resource = resource
@property
def type(self):
"""
Gets the type of this V2alpha1MetricSpec.
type is the type of metric source. It should match one of the fields below.
:return: The type of this V2alpha1MetricSpec.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V2alpha1MetricSpec.
type is the type of metric source. It should match one of the fields below.
:param type: The type of this V2alpha1MetricSpec.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -7,890,363,430,849,370,000 | 31.71134 | 336 | 0.596439 | false | 4.355525 | false | false | false | 0.001576 |
wyc/django | django/middleware/csrf.py | 41 | 8817 | """
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
from __future__ import unicode_literals
import logging
import re
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.encoding import force_text
from django.utils.http import same_origin
logger = logging.getLogger('django.request')
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
CSRF_KEY_LENGTH = 32
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return get_random_string(CSRF_KEY_LENGTH)
def get_token(request):
"""
Returns the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" not in request.META:
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
request.META["CSRF_COOKIE_USED"] = True
return request.META["CSRF_COOKIE"]
def rotate_token(request):
"""
Changes the CSRF token in use for a request - should be done on login
for security purposes.
"""
request.META.update({
"CSRF_COOKIE_USED": True,
"CSRF_COOKIE": _get_new_csrf_key(),
})
def _sanitize_token(token):
# Allow only alphanum
if len(token) > CSRF_KEY_LENGTH:
return _get_new_csrf_key()
token = re.sub('[^a-zA-Z0-9]+', '', force_text(token))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
logger.warning('Forbidden (%s): %s', reason, request.path,
extra={
'status_code': 403,
'request': request,
}
)
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(
request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RFC2616 needs protection
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite.
# It comes after the creation of CSRF cookies, so that
# everything else continues to work exactly the same
# (e.g. cookies are sent, etc.), but before any
# branches that call reject().
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent
# nonce we're using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = force_text(
request.META.get('HTTP_REFERER'),
strings_only=True,
errors='replace'
)
if referer is None:
return self._reject(request, REASON_NO_REFERER)
# Here we generate a list of all acceptable HTTP referers,
# including the current host since that has been validated
# upstream.
good_hosts = list(settings.CSRF_TRUSTED_ORIGINS)
# Note that request.get_host() includes the port.
good_hosts.append(request.get_host())
good_referers = ['https://{0}/'.format(host) for host in good_hosts]
if not any(same_origin(referer, host) for host in good_referers):
reason = REASON_BAD_REFERER % referer
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
return self._reject(request, REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
except IOError:
# Handle a broken connection before we've completed reading
# the POST data. process_view shouldn't raise any
# exceptions, so we'll ignore and serve the user a 403
# (assuming they're still listening, which they probably
# aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE.
request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '')
if not constant_time_compare(request_csrf_token, csrf_token):
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew
# the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY
)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
| bsd-3-clause | 3,284,309,649,845,972,000 | 39.631336 | 87 | 0.601338 | false | 4.459788 | false | false | false | 0.001248 |
m-thielen/hyde | clyde.py | 52 | 7356 | import os
import sys
import tornado.auth
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import simplejson as json
import unicodedata
import yaml
from tornado.options import define, options
from django.conf import settings
from hydeengine import setup_env, Generator
from hydeengine.siteinfo import SiteInfo
from hydeengine.file_system import FileSystemEntity, File, Folder
from repos.dvcs import DVCS
define("port", default=8888, help="run on the given port", type=int)
define("sites", default="sites.yaml", help="yaml file with site definition", type=str)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/sites", SitesJSONHandler),
(r"/site/([^/]+)", SiteHandler),
(r"/site/([^/]+)/files", FilesJSONHandler),
(r"/site/([^/]+)/content", ContentHandler),
(r"/site/([^/]+)/content/save", SaveHandler),
(r"/site/([^/]+)/publish", PublishHandler),
(r"/site/([^/]+)/rename", RenameHandler),
(r"/site/([^/]+)/delete", DeleteHandler),
(r"/site/([^/]+)/generate", GenerateHandler),
]
sites = yaml.load(File(options.sites).read_all())
opts = dict(
static_path = File(__file__).parent.child("clydeweb/media"),
sites = sites
)
tornado.web.Application.__init__(self, handlers, **opts)
class BaseHandler(tornado.web.RequestHandler):
def init_site(self, site, force=False):
if not site in self.settings['sites']:
raise Exception("Site [%s] is not configured." % (site, ))
self.site_path = FileSystemEntity(
self.settings['sites'][site]["path"]).humblepath
if not hasattr(settings, 'siteinfo'):
setup_env(self.site_path)
setattr(settings, 'siteinfo', {})
if not site in settings.siteinfo or force:
self.siteinfo = SiteInfo(settings, self.site_path)
self.siteinfo.refresh()
settings.siteinfo[site] = self.siteinfo
else:
self.siteinfo = settings.siteinfo[site]
def get(self, site):
self.init_site(site)
self.doget(site)
def doget(self, site): abstract
def post(self, site):
self.init_site(site)
self.dopost(site)
def dopost(self, site): abstract
class SitesJSONHandler(tornado.web.RequestHandler):
def get(self):
d = self.settings['sites']
self.set_header("Content-Type", "application/json")
self.write(json.dumps(sorted(d.keys())))
class FilesJSONHandler(BaseHandler):
def doget(self, site):
d = self.siteinfo.content_node.simple_dict
def jsresource(resource):
return dict(
attributes = dict(
tooltip=resource['path'], rel='file'),
data = dict(title=resource['name'])
)
def jsnode(node):
children = [jsresource(resource) for resource in
node['resources']]
children.append([jsnode(child_node)
for child_node in node['nodes']])
return dict(
attributes = dict(tooltip=node['path'], rel='folder'),
data = dict(
title=node['name'],attributes=dict()),
children=children
)
jsdict = jsnode(d)
jsdict['state'] = 'open'
jsonobj = json.dumps(jsdict)
self.set_header("Content-Type", "application/json")
self.write(jsonobj)
class ContentHandler(BaseHandler):
def doget(self, site):
path = self.get_argument("path", None)
if not path: return
f = File(self.siteinfo.folder.child(path))
if not f.exists: return
self.write(f.read_all())
class SiteHandler(tornado.web.RequestHandler):
def get(self, site):
self.render("clydeweb/templates/site.html", site=site)
class GenerateHandler(BaseHandler):
def dopost(self, site):
Generator(self.site_path).generate()
class RenameHandler(BaseHandler):
def dopost(self, site):
path = self.get_argument("path", None)
original_path = self.get_argument("original_path", None)
type = self.get_argument('type', None)
repo = self.settings['sites'][site]['repo']
dvcs = DVCS.load_dvcs(self.siteinfo.folder.path, repo)
if type == "file":
f = File(self.siteinfo.folder.child(original_path))
newf = File(self.siteinfo.folder.child(path))
if not f.exists:
newf.write("")
dvcs.add_file(newf)
else:
f.move_to(newf)
dvcs.add_file(newf, message="Renamed " + path)
else:
f = Folder(self.siteinfo.folder.child(original_path))
newf = Folder(self.siteinfo.folder.child(path))
if not f.exists:
newf.make()
else:
f.move_to(newf)
dvcs.add_file(newf, message="Renamed " + path)
self.init_site(site, force=True)
class DeleteHandler(BaseHandler):
def dopost(self, site):
path = self.get_argument("path", None)
type = self.get_argument('type', None)
repo = self.settings['sites'][site]['repo']
dvcs = DVCS.load_dvcs(self.siteinfo.folder.path, repo)
f = None
if type == "file":
f = File(self.siteinfo.folder.child(path))
else:
f = Folder(self.siteinfo.folder.child(path))
f.delete()
dvcs.save_draft()
self.init_site(site, force=True)
class SaveHandler(BaseHandler):
def dopost(self, site):
path = self.get_argument("path", None)
if not path: return
content = self.get_argument("content", None)
f = File(self.siteinfo.folder.child(path))
f.write(content)
repo = self.settings['sites'][site]['repo']
dvcs = DVCS.load_dvcs(self.siteinfo.folder.path, repo)
dvcs.save_draft()
class PublishHandler(BaseHandler):
def dopost(self, site):
repo = self.settings['sites'][site]['repo']
dvcs = DVCS.load_dvcs(self.siteinfo.folder.path, repo)
dvcs.publish()
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main() | mit | -8,360,743,617,226,954,000 | 36.92268 | 115 | 0.521207 | false | 4.170068 | false | false | false | 0.014002 |
blissland/blissflixx | chls/bfch_r_fullmoviesonyoutube/__init__.py | 3 | 3656 | import chanutils.reddit
_SUBREDDIT = 'fullmoviesonyoutube'
_FEEDLIST = [
{'title':'Latest', 'url':'http://www.reddit.com/r/fullmoviesonyoutube.json'},
{'title':'Action', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AAction&sort=top&restrict_sr=on'},
{'title':'Adventure', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AAdventure&sort=top&restrict_sr=on&t=all'},
{'title':'Animation', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AAnimation&sort=top&restrict_sr=on&t=all'},
{'title':'Biography', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3ABiography&sort=top&restrict_sr=on&t=all'},
{'title':'Comedy', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AComedy&sort=top&restrict_sr=on&t=all'},
{'title':'Crime', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3ACrime&sort=top&restrict_sr=on&t=all'},
{'title':'Documentary', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3ADocumentary&sort=top&restrict_sr=on&t=all'},
{'title':'Drama', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3ADrama&sort=top&restrict_sr=on&t=all'},
{'title':'Family', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AFamily&sort=top&restrict_sr=on&t=all'},
{'title':'Fantasy', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AFantasy&sort=top&restrict_sr=on&t=all'},
{'title':'Film-Noir', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3ANoir&sort=top&restrict_sr=on&t=all'},
{'title':'History', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AHistory&sort=top&restrict_sr=on&t=all'},
{'title':'Horror', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AHorror&sort=top&restrict_sr=on&t=all'},
{'title':'Misc/Adult', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AMisc+OR+flair%3AAdult&sort=top&restrict_sr=on'},
{'title':'Musical', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AMusical&sort=top&restrict_sr=on&t=all'},
{'title':'Mystery', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AMystery&sort=top&restrict_sr=on&t=all'},
{'title':'Romance', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3ARomance&sort=top&restrict_sr=on&t=all'},
{'title':'Sci-Fi', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3ASci-Fi&sort=top&restrict_sr=on&t=all'},
{'title':'Sport', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3ASport&sort=top&restrict_sr=on&t=all'},
{'title':'Thriller', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AThriller&sort=top&restrict_sr=on&t=all'},
{'title':'War', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AWar&sort=top&restrict_sr=on&t=all'},
{'title':'Western', 'url':'http://www.reddit.com/r/fullmoviesonyoutube/search.json?q=flair%3AWestern&sort=top&restrict_sr=on&t=all'},
]
def name():
return 'Youtube Movies'
def image():
return "icon.png"
def description():
return "Full movies available on Youtube. Data imported from /r/fullmoviesonyoutube subreddit (<a target='_blank' href='http://www.reddit.com/r/fullmoviesonyoutube/'>http://www.reddit.com/r/fullmoviesonyoutube</a>)."
def feedlist():
return _FEEDLIST
def feed(idx):
return chanutils.reddit.get_feed(_FEEDLIST[idx], moviesubs=True)
def search(q):
return chanutils.reddit.search(_SUBREDDIT, q, moviesubs=True)
| gpl-2.0 | 4,384,151,833,472,011,000 | 76.787234 | 218 | 0.727024 | false | 2.615165 | false | false | false | 0.022155 |
valbertovc/blog_django_bootstrap_ajax | accounts/models.py | 1 | 2454 | # -*- coding:utf8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models import signals
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from accounts.choices import SOCIAL_CHOICES
from django.conf import settings
class UserProfile(models.Model):
user = models.OneToOneField('auth.User', related_name='userprofile')
site = models.URLField()
bio = models.TextField()
picture = models.ImageField(upload_to='profiles', blank=True)
def __unicode__(self):
return u'Profile of user: %s' % self.user.username
def get_absolute_url(self):
return reverse('profile-detail', kwargs={'slug': self.user.username})
def is_post_recommended(self, post):
ids = []
for post_recommended in self.user.posts_recommended.all():
ids.append(post_recommended.pk)
return post.pk in ids
def is_comment_recommended(self, comment):
ids = []
for comment_recommended in self.user.comments_recommended.all():
ids.append(comment_recommended.pk)
return comment.pk in ids
def is_comment_liked(self, comment):
ids = []
for comment_liked in self.user.comments_liked.all():
ids.append(comment_liked.pk)
return comment.pk in ids
def is_comment_unliked(self, comment):
ids = []
for comment_unliked in self.user.comments_unliked.all():
ids.append(comment_unliked.pk)
return comment.pk in ids
# from django.dispatch import receiver
# from django.db.models.signals import post_save
# #faz com que, todo usuário tenha um profile
# @receiver(post_save, sender=User)
# def create_user_profile(sender, instance, created, **kwargs):
# if created:
# UserProfile.objects.get_or_create(user=instance)
class SocialNetwork(models.Model):
icon = models.CharField(max_length=30, choices=SOCIAL_CHOICES.items())
name = models.CharField(max_length=50)
url = models.URLField()
users = models.ManyToManyField(settings.AUTH_USER_MODEL, through='Social',
through_fields=('social_network', 'user'))
def __unicode__(self):
return self.name
class Social(models.Model):
profile = models.CharField(max_length=100)
social_network = models.ForeignKey(SocialNetwork)
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='social_set')
| mit | 7,332,644,590,393,501,000 | 35.073529 | 82 | 0.676722 | false | 3.779661 | false | false | false | 0.003669 |
petewarden/tensorflow | tensorflow/python/ops/conv2d_benchmark.py | 44 | 8275 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for Conv2D op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"enable_layout_optimizer", False,
"If true, enables layout optimizer to update input data format for faster "
"execution of convolution ops.")
def build_graph(device, dtype, data_format, input_shape, filter_shape, strides,
padding, num_iters, warmup_iters):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
dtype: Data type for the convolution.
data_format: A string from: "NHWC" or "NCHW". Data format for input and
output data.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use.
num_iters: number of iterations to run conv2d.
warmup_iters: number of iterations for warmup runs.
Returns:
An array of tensors to run()
"""
with ops.device("/%s:0" % device):
inp = variables.VariableV1(
random_ops.truncated_normal(input_shape, dtype=dtype))
filt = variables.VariableV1(
random_ops.truncated_normal(filter_shape, dtype=dtype))
outputs = []
conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
outputs.append(conv2d_op)
for _ in range(1, num_iters):
with ops.control_dependencies([conv2d_op]):
conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
outputs.append(conv2d_op)
warmup_groups = []
warmup_conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
warmup_groups.append(warmup_conv2d_op)
for _ in range(1, warmup_iters):
with ops.control_dependencies([warmup_conv2d_op]):
warmup_conv2d_op = nn_ops.conv2d(
inp, filt, strides, padding, data_format=data_format)
warmup_groups.append(warmup_conv2d_op)
return control_flow_ops.group(*warmup_groups), control_flow_ops.group(
*outputs)
class Conv2DBenchmark(test.Benchmark):
"""Benchmark conv2d!"""
def _run_graph(self, device, dtype, data_format, input_shape, filter_shape,
strides, padding, num_iters, warmup_iters):
"""runs the graph and print its execution time.
Args:
device: String, the device to run on.
dtype: Data type for the convolution.
data_format: A string from: "NHWC" or "NCHW". Data format for input and
output data.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter tensor.
strides: A list of ints. 1-D of length 4. The stride of sliding
window for each dimension of input.
padding: A string from: "SAME", "VALID". The type of padding
algorithm to use. num_iters: Number of iterations to run the
benchmark.
num_iters: number of iterations to run conv2d.
warmup_iters: number of iterations for warmup runs.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
warmup_outputs, outputs = build_graph(device, dtype, data_format,
input_shape, filter_shape, strides,
padding, num_iters, warmup_iters)
config = config_pb2.ConfigProto()
config.graph_options.optimizer_options.opt_level = -1
rewrite_options = config.graph_options.rewrite_options
# Disable layout optimizer to not change input data_format.
rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.ON if FLAGS.enable_layout_optimizer
else rewriter_config_pb2.RewriterConfig.OFF)
# Convolution ops are effectively noop in the test graph as we are not
# fetching the convolution outputs. Disable dependency optimizer to not
# remove the conv ops.
rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
with session_lib.Session(graph=graph, config=config) as session:
# TODO(hinsu): Use run_op_benchmark method from test.Benchmark to run
# benchmark along with warmup.
variables.global_variables_initializer().run()
# warmup runs
session.run(warmup_outputs)
start_time = time.time()
session.run(outputs)
duration = (time.time() - start_time) / num_iters
print("%s %s %s inputshape:%s filtershape:%s strides:%s padding:%s "
"%d iters: %.8f sec" %
(device, str(dtype), data_format, str(input_shape).replace(
" ", ""), str(filter_shape).replace(" ", ""),
str(strides).replace(" ", ""), padding, num_iters, duration))
name_template = (
"conv2d_{device}_{datatype}_{data_format}_input_shape_{inputshape}_"
"filter_shape_{filtershape}_strides_{strides}_padding_{padding}")
self.report_benchmark(
name=name_template.format(
device=device,
datatype=str(dtype),
data_format=str(data_format),
inputshape=str(input_shape).replace(" ", ""),
filtershape=str(filter_shape).replace(" ", ""),
strides=str(strides).replace(" ", ""),
padding=padding).replace(" ", ""),
iters=num_iters,
wall_time=duration)
return duration
def benchmark_conv2d(self):
print("conv2d benchmark:")
data_types = [dtypes.float32, dtypes.float16]
data_formats = ["NHWC", "NCHW"]
in_channels = list(range(1, 10)) + list(range(10, 20, 2)) + list(
range(20, 33, 4))
out_channels = [4, 16, 32]
hw_strides = [[2, 2]]
paddings = ["VALID", "SAME"]
args_lists = [
data_types, data_formats, in_channels, out_channels, hw_strides,
paddings
]
for args in itertools.product(*args_lists):
dtype, data_format, in_channel, out_channel, hw_stride, padding = args
# Keep batch size same as out channels just to reduce the number of
# different configurations to benchmark.
batch_size = out_channel
h, w, fh, fw = 500, 500, 3, 3
if data_format == "NHWC":
ishape = [batch_size, h, w, in_channel]
stride = [1] + hw_stride + [1]
elif data_format == "NCHW":
ishape = [batch_size, in_channel, h, w]
stride = [1, 1] + hw_stride
else:
raise ValueError("Unknown data_format: " + str(data_format))
fshape = [fh, fw, in_channel, out_channel]
num_iters = 80
warmup_iters = 2
self._run_graph("gpu", dtype, data_format, ishape, fshape, stride,
padding, num_iters, warmup_iters)
if __name__ == "__main__":
test.main()
| apache-2.0 | 6,206,059,001,884,615,000 | 38.218009 | 80 | 0.640846 | false | 3.874064 | true | false | false | 0.003867 |
joyaether/zxing | cpp/scons/scons-local-2.0.0.final.0/SCons/compat/_scons_io.py | 34 | 1827 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
io compatibility module for older (pre-2.6) Python versions
This does not not NOT (repeat, *NOT*) provide complete io
functionality. It only wraps the portions of io functionality used
by SCons, in an interface that looks enough like io for our purposes.
"""
__revision__ = "src/engine/SCons/compat/_scons_io.py 5023 2010/06/14 22:05:46 scons"
# Use the "imp" module to protect the imports below from fixers.
import imp
_cStringIO = imp.load_module('cStringIO', *imp.find_module('cStringIO'))
StringIO = _cStringIO.StringIO
del _cStringIO
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | 8,849,247,072,923,119,000 | 39.6 | 95 | 0.757526 | false | 3.929032 | false | false | false | 0.001095 |
rec/DMXIS | Macros/Python/pickletools.py | 5 | 76619 | '''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
__all__ = ['dis', 'genops', 'optimize']
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
"""
"A pickle" is a program for a virtual pickle machine (PM, but more accurately
called an unpickling machine). It's a sequence of opcodes, interpreted by the
PM, building an arbitrarily complex Python object.
For the most part, the PM is very simple: there are no looping, testing, or
conditional instructions, no arithmetic and no function calls. Opcodes are
executed once each, from first to last, until a STOP opcode is reached.
The PM has two data areas, "the stack" and "the memo".
Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
integer object on the stack, whose value is gotten from a decimal string
literal immediately following the INT opcode in the pickle bytestream. Other
opcodes take Python objects off the stack. The result of unpickling is
whatever object is left on the stack when the final STOP opcode is executed.
The memo is simply an array of objects, or it can be implemented as a dict
mapping little integers to objects. The memo serves as the PM's "long term
memory", and the little integers indexing the memo are akin to variable
names. Some opcodes pop a stack object into the memo at a given index,
and others push a memo object at a given index onto the stack again.
At heart, that's all the PM has. Subtleties arise for these reasons:
+ Object identity. Objects can be arbitrarily complex, and subobjects
may be shared (for example, the list [a, a] refers to the same object a
twice). It can be vital that unpickling recreate an isomorphic object
graph, faithfully reproducing sharing.
+ Recursive objects. For example, after "L = []; L.append(L)", L is a
list, and L[0] is the same list. This is related to the object identity
point, and some sequences of pickle opcodes are subtle in order to
get the right result in all cases.
+ Things pickle doesn't know everything about. Examples of things pickle
does know everything about are Python's builtin scalar and container
types, like ints and tuples. They generally have opcodes dedicated to
them. For things like module references and instances of user-defined
classes, pickle's knowledge is limited. Historically, many enhancements
have been made to the pickle protocol in order to do a better (faster,
and/or more compact) job on those.
+ Backward compatibility and micro-optimization. As explained below,
pickle opcodes never go away, not even when better ways to do a thing
get invented. The repertoire of the PM just keeps growing over time.
For example, protocol 0 had two opcodes for building Python integers (INT
and LONG), protocol 1 added three more for more-efficient pickling of short
integers, and protocol 2 added two more for more-efficient pickling of
long integers (before protocol 2, the only ways to pickle a Python long
took time quadratic in the number of digits, for both pickling and
unpickling). "Opcode bloat" isn't so much a subtlety as a source of
wearying complication.
Pickle protocols:
For compatibility, the meaning of a pickle opcode never changes. Instead new
pickle opcodes get added, and each version's unpickler can handle all the
pickle opcodes in all protocol versions to date. So old pickles continue to
be readable forever. The pickler can generally be told to restrict itself to
the subset of opcodes available under previous protocol versions too, so that
users can create pickles under the current version readable by older
versions. However, a pickle does not contain its version number embedded
within it. If an older unpickler tries to read a pickle using a later
protocol, the result is most likely an exception due to seeing an unknown (in
the older unpickler) opcode.
The original pickle used what's now called "protocol 0", and what was called
"text mode" before Python 2.3. The entire pickle bytestream is made up of
printable 7-bit ASCII characters, plus the newline character, in protocol 0.
That's why it was called text mode. Protocol 0 is small and elegant, but
sometimes painfully inefficient.
The second major set of additions is now called "protocol 1", and was called
"binary mode" before Python 2.3. This added many opcodes with arguments
consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
bytes. Binary mode pickles can be substantially smaller than equivalent
text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
int as 4 bytes following the opcode, which is cheaper to unpickle than the
(perhaps) 11-character decimal string attached to INT. Protocol 1 also added
a number of opcodes that operate on many stack elements at once (like APPENDS
and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
The third major set of additions came in Python 2.3, and is called "protocol
2". This added:
- A better way to pickle instances of new-style classes (NEWOBJ).
- A way for a pickle to identify its protocol (PROTO).
- Time- and space- efficient pickling of long ints (LONG{1,4}).
- Shortcuts for small tuples (TUPLE{1,2,3}}.
- Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
- The "extension registry", a vector of popular objects that can be pushed
efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
the registry contents are predefined (there's nothing akin to the memo's
PUT).
Another independent change with Python 2.3 is the abandonment of any
pretense that it might be safe to load pickles received from untrusted
parties -- no sufficient security analysis has been done to guarantee
this and there isn't a use case that warrants the expense of such an
analysis.
To this end, all tests for __safe_for_unpickling__ or for
copy_reg.safe_constructors are removed from the unpickling code.
References to these variables in the descriptions below are to be seen
as describing unpickling in Python 2.2 and before.
"""
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import StringIO
>>> read_uint1(StringIO.StringIO('\xff'))
255
"""
data = f.read(1)
if data:
return ord(data)
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import StringIO
>>> read_uint2(StringIO.StringIO('\xff\x00'))
255
>>> read_uint2(StringIO.StringIO('\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import StringIO
>>> read_int4(StringIO.StringIO('\xff\x00\x00\x00'))
255
>>> read_int4(StringIO.StringIO('\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import StringIO
>>> read_stringnl(StringIO.StringIO("'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(StringIO.StringIO("\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around ''
>>> read_stringnl(StringIO.StringIO("\n"), stripquotes=False)
''
>>> read_stringnl(StringIO.StringIO("''\n"))
''
>>> read_stringnl(StringIO.StringIO('"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(StringIO.StringIO(r"'a\n\\b\x00c\td'" + "\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in "'\"":
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
# I'm not sure when 'string_escape' was added to the std codecs; it's
# crazy not to use it if it's there.
if decode:
data = data.decode('string_escape')
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, decode=False, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import StringIO
>>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string4(f):
r"""
>>> import StringIO
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x00abc"))
''
>>> read_string4(StringIO.StringIO("\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(StringIO.StringIO("\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_string1(f):
r"""
>>> import StringIO
>>> read_string1(StringIO.StringIO("\x00"))
''
>>> read_string1(StringIO.StringIO("\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import StringIO
>>> read_unicodestringnl(StringIO.StringIO("abc\uabcd\njunk"))
u'abc\uabcd'
"""
data = f.readline()
if not data.endswith('\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return unicode(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring4(f):
r"""
>>> import StringIO
>>> s = u'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
'abcd\xea\xaf\x8d'
>>> n = chr(len(enc)) + chr(0) * 3 # little-endian 4-byte length
>>> t = read_unicodestring4(StringIO.StringIO(n + enc + 'junk'))
>>> s == t
True
>>> read_unicodestring4(StringIO.StringIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("unicodestring4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return unicode(data, 'utf-8')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import StringIO
>>> read_decimalnl_short(StringIO.StringIO("1234\n56"))
1234
>>> read_decimalnl_short(StringIO.StringIO("1234L\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' not allowed in '1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s.endswith("L"):
raise ValueError("trailing 'L' not allowed in %r" % s)
# It's not necessarily true that the result fits in a Python short int:
# the pickle may have been written on a 64-bit box. There's also a hack
# for True and False here.
if s == "00":
return False
elif s == "01":
return True
try:
return int(s)
except OverflowError:
return long(s)
def read_decimalnl_long(f):
r"""
>>> import StringIO
>>> read_decimalnl_long(StringIO.StringIO("1234\n56"))
Traceback (most recent call last):
...
ValueError: trailing 'L' required in '1234'
Someday the trailing 'L' will probably go away from this output.
>>> read_decimalnl_long(StringIO.StringIO("1234L\n56"))
1234L
>>> read_decimalnl_long(StringIO.StringIO("123456789012345678901234L\n6"))
123456789012345678901234L
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if not s.endswith("L"):
raise ValueError("trailing 'L' required in %r" % s)
return long(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import StringIO
>>> read_floatnl(StringIO.StringIO("-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import StringIO, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(StringIO.StringIO(raw + "\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and cPickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import StringIO
>>> read_long1(StringIO.StringIO("\x00"))
0L
>>> read_long1(StringIO.StringIO("\x02\xff\x00"))
255L
>>> read_long1(StringIO.StringIO("\x02\xff\x7f"))
32767L
>>> read_long1(StringIO.StringIO("\x02\x00\xff"))
-256L
>>> read_long1(StringIO.StringIO("\x02\x00\x80"))
-32768L
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import StringIO
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x00"))
255L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x7f"))
32767L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\xff"))
-256L
>>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\x80"))
-32768L
>>> read_long1(StringIO.StringIO("\x00\x00\x00\x00"))
0L
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the long 0L, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = StackObject(
name='int',
obtype=int,
doc="A short (as opposed to long) Python integer object.")
pylong = StackObject(
name='long',
obtype=long,
doc="A long (as opposed to short) Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, long, bool),
doc="A Python integer object (short or long), or "
"a Python bool.")
pybool = StackObject(
name='bool',
obtype=(bool,),
doc="A Python bool object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pystring = StackObject(
name='str',
obtype=str,
doc="A Python string object.")
pyunicode = StackObject(
name='unicode',
obtype=unicode,
doc="A Python Unicode string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjuction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= 2
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pylong],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pylong],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pystring],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pystring],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. IOW:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops two values off the stack and pushes a tuple
of length 2 whose items are those values back onto it. IOW:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""One-tuple.
This code pops three values off the stack and pushes a tuple
of length 3 whose items are those values back onto it. IOW:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-teriminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte signed
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=int4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
signed little-endian integer following.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If type(callable) is not ClassType, REDUCE complains unless the
callable has been registered with the copy_reg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
This may raise RuntimeError in restricted execution mode (which
disallows access to __dict__ directly); in that case, the object
is updated instead via
for k, v in argument.items():
anyobject[k] = v
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ It's an old-style class object (the type of the class object is
ClassType).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done. In
restricted execution mode it can fail (assignment to __class__ is
disallowed), and I'm not really sure what happens then -- it looks
like the code ends up calling the class object's __init__ anyway,
via falling into the next case.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug; cPickle
requires the attribute to be true). If __safe_for_unpickling__
doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug; cPickle does test __safe_for_unpickling__). See INST for
the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
import pickle, re
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print "skipping %r: it doesn't look like an opcode name" % name
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, str) or len(picklecode) != 1:
if verbose:
print ("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode))
continue
if picklecode in copy:
if verbose:
print "checking name %r w/ code %r for consistency" % (
name, picklecode)
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a string object,
it's wrapped in a StringIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
import cStringIO as StringIO
if isinstance(pickle, str):
pickle = StringIO.StringIO(pickle)
if hasattr(pickle, "tell"):
getpos = pickle.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = pickle.read(1)
opcode = code2op.get(code)
if opcode is None:
if code == "":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
pos is None and "<unknown>" or pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(pickle)
yield opcode, arg, pos
if code == '.':
assert opcode.name == 'STOP'
break
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
gets = set() # set of args used by a GET opcode
puts = [] # (arg, startpos, stoppos) for the PUT opcodes
prevpos = None # set to pos if previous opcode was a PUT
for opcode, arg, pos in genops(p):
if prevpos is not None:
puts.append((prevarg, prevpos, pos))
prevpos = None
if 'PUT' in opcode.name:
prevarg, prevpos = arg, pos
elif 'GET' in opcode.name:
gets.add(arg)
# Copy the pickle string except for PUTS without a corresponding GET
s = []
i = 0
for arg, start, stop in puts:
j = stop if (arg in gets) else start
s.append(p[i:j])
i = stop
s.append(p[i:])
return ''.join(s)
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg indentlevel is the number of blanks by which to indent
a new MARK level. It defaults to 4.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpicker memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
for opcode, arg, pos in genops(pickle):
if pos is not None:
print >> out, "%5d:" % pos,
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT"):
assert arg is not None
if arg in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[arg] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
print >> out, line
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print >> out, "highest protocol among opcodes =", maxproto
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {'abc': u"def"}]
>>> pkl = pickle.dumps(x, 0)
>>> dis(pkl)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: I INT 1
8: a APPEND
9: I INT 2
12: a APPEND
13: ( MARK
14: I INT 3
17: I INT 4
20: t TUPLE (MARK at 13)
21: p PUT 1
24: a APPEND
25: ( MARK
26: d DICT (MARK at 25)
27: p PUT 2
30: S STRING 'abc'
37: p PUT 3
40: V UNICODE u'def'
45: p PUT 4
48: s SETITEM
49: a APPEND
50: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl = pickle.dumps(x, 1)
>>> dis(pkl)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: U SHORT_BINSTRING 'abc'
24: q BINPUT 3
26: X BINUNICODE u'def'
34: q BINPUT 4
36: s SETITEM
37: e APPENDS (MARK at 3)
38: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: i INST 'pickletools _Example' (MARK at 5)
28: p PUT 1
31: ( MARK
32: d DICT (MARK at 31)
33: p PUT 2
36: S STRING 'value'
45: p PUT 3
48: I INT 42
52: s SETITEM
53: b BUILD
54: a APPEND
55: g GET 1
58: a APPEND
59: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: ( MARK
5: c GLOBAL 'pickletools _Example'
27: q BINPUT 1
29: o OBJ (MARK at 4)
30: q BINPUT 2
32: } EMPTY_DICT
33: q BINPUT 3
35: U SHORT_BINSTRING 'value'
42: q BINPUT 4
44: K BININT1 42
46: s SETITEM
47: b BUILD
48: h BINGET 2
50: e APPENDS (MARK at 3)
51: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> from StringIO import StringIO
>>> f = StringIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| artistic-2.0 | 5,298,702,787,502,199,000 | 31.738001 | 79 | 0.573879 | false | 4.080253 | false | false | false | 0.00064 |
xupit3r/askpgh | askbot/utils/forms.py | 2 | 11683 | import re
from django import forms
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from askbot.conf import settings as askbot_settings
from askbot.utils.slug import slugify
from askbot.utils.functions import split_list, mark_safe_lazy
from askbot import const
from longerusername import MAX_USERNAME_LENGTH
import logging
import urllib
def clean_next(next, default=None):
if next is None or not next.startswith('/'):
if default:
return default
else:
return reverse('index')
if isinstance(next, str):
next = unicode(urllib.unquote(next), 'utf-8', 'replace')
next = next.strip()
logging.debug('next url is %s' % next)
return next
def get_error_list(form_instance):
"""return flat list of error values for the form"""
lists = form_instance.errors.values()
errors = list()
for error_list in lists:
errors.extend(list(error_list))
return errors
def get_next_url(request, default = None):
return clean_next(request.REQUEST.get('next'), default)
def get_db_object_or_404(params):
"""a utility function that returns an object
in return to the model_name and object_id
only specific models are accessible
"""
from askbot import models
try:
model_name = params['model_name']
assert(model_name=='Group')
model = models.get_model(model_name)
obj_id = forms.IntegerField().clean(params['object_id'])
return get_object_or_404(model, id=obj_id)
except Exception:
#need catch-all b/c of the nature of the function
raise Http404
def format_errors(error_list):
"""If there is only one error - returns a string
corresponding to that error, to remove the <ul> tag.
If there is > 1 error - then convert the error_list into
a string.
"""
if len(error_list) == 1:
return unicode(error_list[0])
else:
return unicode(error_list)
class StrippedNonEmptyCharField(forms.CharField):
def clean(self, value):
value = value.strip()
if self.required and value == '':
raise forms.ValidationError(_('this field is required'))
return value
class NextUrlField(forms.CharField):
def __init__(self):
super(
NextUrlField,
self
).__init__(
max_length = 255,
widget = forms.HiddenInput(),
required = False
)
def clean(self,value):
return clean_next(value)
login_form_widget_attrs = { 'class': 'required login' }
class UserNameField(StrippedNonEmptyCharField):
RESERVED_NAMES = (u'fuck', u'shit', u'ass', u'sex', u'add',
u'edit', u'save', u'delete', u'manage', u'update', 'remove', 'new')
def __init__(
self,
db_model=User,
db_field='username',
must_exist=False,
skip_clean=False,
label=_('Choose a screen name'),
widget_attrs=None,
**kw
):
self.must_exist = must_exist
self.skip_clean = skip_clean
self.db_model = db_model
self.db_field = db_field
self.user_instance = None
error_messages={
'required': _('user name is required'),
'taken': _('sorry, this name is taken, please choose another'),
'forbidden': _('sorry, this name is not allowed, please choose another'),
'missing': _('sorry, there is no user with this name'),
'multiple-taken': _('sorry, we have a serious error - user name is taken by several users'),
'invalid': _('user name can only consist of letters, empty space and underscore'),
'meaningless': _('please use at least some alphabetic characters in the user name'),
'noemail': _('symbol "@" is not allowed')
}
if 'error_messages' in kw:
error_messages.update(kw['error_messages'])
del kw['error_messages']
if widget_attrs:
widget_attrs.update(login_form_widget_attrs)
else:
widget_attrs = login_form_widget_attrs
max_length = MAX_USERNAME_LENGTH()
super(UserNameField,self).__init__(
max_length=max_length,
widget=forms.TextInput(attrs=widget_attrs),
label=label,
error_messages=error_messages,
**kw
)
def clean(self,username):
""" validate username """
if self.skip_clean == True:
logging.debug('username accepted with no validation')
return username
if self.user_instance is None:
pass
elif isinstance(self.user_instance, User):
if username == self.user_instance.username:
logging.debug('username valid')
return username
else:
raise TypeError('user instance must be of type User')
try:
username = super(UserNameField, self).clean(username)
except forms.ValidationError:
raise forms.ValidationError(self.error_messages['required'])
username_re_string = const.USERNAME_REGEX_STRING
#attention: here we check @ symbol in two places: input and the regex
if askbot_settings.ALLOW_EMAIL_ADDRESS_IN_USERNAME is False:
if '@' in username:
raise forms.ValidationError(self.error_messages['noemail'])
username_re_string = username_re_string.replace('@', '')
username_regex = re.compile(username_re_string, re.UNICODE)
if self.required and not username_regex.search(username):
raise forms.ValidationError(self.error_messages['invalid'])
if username in self.RESERVED_NAMES:
raise forms.ValidationError(self.error_messages['forbidden'])
if slugify(username) == '':
raise forms.ValidationError(self.error_messages['meaningless'])
try:
user = self.db_model.objects.get(
**{'%s' % self.db_field : username}
)
if user:
if self.must_exist:
logging.debug('user exists and name accepted b/c here we validate existing user')
return username
else:
raise forms.ValidationError(self.error_messages['taken'])
except self.db_model.DoesNotExist:
if self.must_exist:
logging.debug('user must exist, so raising the error')
raise forms.ValidationError(self.error_messages['missing'])
else:
logging.debug('user name valid!')
return username
except self.db_model.MultipleObjectsReturned:
logging.debug('error - user with this name already exists')
raise forms.ValidationError(self.error_messages['multiple-taken'])
def email_is_allowed(
email, allowed_emails='', allowed_email_domains=''
):
"""True, if email address is pre-approved or matches a allowed
domain"""
if allowed_emails:
email_list = split_list(allowed_emails)
allowed_emails = ' ' + ' '.join(email_list) + ' '
email_match_re = re.compile(r'\s%s\s' % email)
if email_match_re.search(allowed_emails):
return True
if allowed_email_domains:
email_domain = email.split('@')[1]
domain_list = split_list(allowed_email_domains)
domain_match_re = re.compile(r'\s%s\s' % email_domain)
allowed_email_domains = ' ' + ' '.join(domain_list) + ' '
return domain_match_re.search(allowed_email_domains)
return False
class UserEmailField(forms.EmailField):
def __init__(self, skip_clean=False, **kw):
self.skip_clean = skip_clean
hidden = kw.pop('hidden', False)
if hidden is True:
widget_class = forms.HiddenInput
else:
widget_class = forms.TextInput
super(UserEmailField,self).__init__(
widget=widget_class(
attrs=dict(login_form_widget_attrs, maxlength=200)
),
label=mark_safe_lazy(_('Your email <i>(never shared)</i>')),
error_messages={
'required':_('email address is required'),
'invalid':_('please enter a valid email address'),
'taken':_('this email is already used by someone else, please choose another'),
'unauthorized':_('this email address is not authorized')
},
**kw
)
def clean(self, email):
""" validate if email exist in database
from legacy register
return: raise error if it exist """
email = super(UserEmailField,self).clean(email.strip())
if self.skip_clean:
return email
allowed_domains = askbot_settings.ALLOWED_EMAIL_DOMAINS.strip()
allowed_emails = askbot_settings.ALLOWED_EMAILS.strip()
if allowed_emails or allowed_domains:
if not email_is_allowed(
email,
allowed_emails=allowed_emails,
allowed_email_domains=allowed_domains
):
raise forms.ValidationError(self.error_messages['unauthorized'])
try:
user = User.objects.get(email__iexact=email)
logging.debug('email taken')
raise forms.ValidationError(self.error_messages['taken'])
except User.DoesNotExist:
logging.debug('email valid')
return email
except User.MultipleObjectsReturned:
logging.critical('email taken many times over')
raise forms.ValidationError(self.error_messages['taken'])
class SetPasswordForm(forms.Form):
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs=login_form_widget_attrs,
render_value=True
),
label=_('Password'),
error_messages={'required':_('password is required')},
)
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs=login_form_widget_attrs,
render_value=True
),
label=_('Password retyped'),
error_messages={'required':_('please, retype your password'),
'nomatch':_('entered passwords did not match, please try again')},
)
def __init__(self, data=None, user=None, *args, **kwargs):
super(SetPasswordForm, self).__init__(data, *args, **kwargs)
def clean_password2(self):
"""
Validates that the two password inputs match.
"""
if 'password1' in self.cleaned_data:
if self.cleaned_data['password1'] == self.cleaned_data['password2']:
self.password = self.cleaned_data['password2']
self.cleaned_data['password'] = self.cleaned_data['password2']
return self.cleaned_data['password2']
else:
del self.cleaned_data['password2']
raise forms.ValidationError(self.fields['password2'].error_messages['nomatch'])
else:
return self.cleaned_data['password2']
| gpl-3.0 | 2,806,546,716,169,801,000 | 37.304918 | 114 | 0.57665 | false | 4.462567 | false | false | false | 0.00505 |
dims/neutron | neutron/tests/unit/agent/l3/test_dvr_fip_ns.py | 1 | 11743 | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from neutron.agent.common import utils
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.tests import base
_uuid = uuidutils.generate_uuid
class TestDvrFipNs(base.BaseTestCase):
def setUp(self):
super(TestDvrFipNs, self).setUp()
self.conf = mock.Mock()
self.conf.state_path = '/tmp'
self.driver = mock.Mock()
self.driver.DEV_NAME_LEN = 14
self.net_id = _uuid()
self.fip_ns = dvr_fip_ns.FipNamespace(self.net_id,
self.conf,
self.driver,
use_ipv6=True)
def test_subscribe(self):
is_first = self.fip_ns.subscribe(mock.sentinel.external_net_id)
self.assertTrue(is_first)
def test_subscribe_not_first(self):
self.fip_ns.subscribe(mock.sentinel.external_net_id)
is_first = self.fip_ns.subscribe(mock.sentinel.external_net_id2)
self.assertFalse(is_first)
def test_unsubscribe(self):
self.fip_ns.subscribe(mock.sentinel.external_net_id)
is_last = self.fip_ns.unsubscribe(mock.sentinel.external_net_id)
self.assertTrue(is_last)
def test_unsubscribe_not_last(self):
self.fip_ns.subscribe(mock.sentinel.external_net_id)
self.fip_ns.subscribe(mock.sentinel.external_net_id2)
is_last = self.fip_ns.unsubscribe(mock.sentinel.external_net_id2)
self.assertFalse(is_last)
def test_allocate_rule_priority(self):
pr = self.fip_ns.allocate_rule_priority('20.0.0.30')
self.assertIn('20.0.0.30', self.fip_ns._rule_priorities.allocations)
self.assertNotIn(pr, self.fip_ns._rule_priorities.pool)
def test_deallocate_rule_priority(self):
pr = self.fip_ns.allocate_rule_priority('20.0.0.30')
self.fip_ns.deallocate_rule_priority('20.0.0.30')
self.assertNotIn('20.0.0.30', self.fip_ns._rule_priorities.allocations)
self.assertIn(pr, self.fip_ns._rule_priorities.pool)
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
@mock.patch.object(ip_lib, 'device_exists')
def test_gateway_added(self, device_exists, send_adv_notif,
IPDevice, IPWrapper):
subnet_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': self.net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
device_exists.return_value = False
self.fip_ns._gateway_added(agent_gw_port,
mock.sentinel.interface_name)
self.assertEqual(1, self.driver.plug.call_count)
self.assertEqual(1, self.driver.init_l3.call_count)
send_adv_notif.assert_called_once_with(self.fip_ns.get_name(),
mock.sentinel.interface_name,
'20.0.0.30',
mock.ANY)
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif')
@mock.patch.object(ip_lib, 'device_exists')
def test_gateway_outside_subnet_added(self, device_exists, send_adv_notif,
IPDevice, IPWrapper):
device = mock.Mock()
IPDevice.return_value = device
subnet_id = _uuid()
agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.1.1'}],
'id': _uuid(),
'network_id': self.net_id,
'mac_address': 'ca:fe:de:ad:be:ef'}
device_exists.return_value = False
self.fip_ns._gateway_added(agent_gw_port,
mock.sentinel.interface_name)
self.assertEqual(1, self.driver.plug.call_count)
self.assertEqual(1, self.driver.init_l3.call_count)
send_adv_notif.assert_called_once_with(self.fip_ns.get_name(),
mock.sentinel.interface_name,
'20.0.0.30',
mock.ANY)
device.route.add_route.assert_called_once_with('20.0.1.1',
scope='link')
device.route.add_gateway.assert_called_once_with('20.0.1.1')
@mock.patch.object(iptables_manager, 'IptablesManager')
@mock.patch.object(utils, 'execute')
@mock.patch.object(ip_lib.IpNetnsCommand, 'exists')
def _test_create(self, old_kernel, exists, execute, IPTables):
exists.return_value = True
# There are up to four sysctl calls - two for ip_nonlocal_bind,
# and two to enable forwarding
execute.side_effect = [RuntimeError if old_kernel else None,
None, None, None]
self.fip_ns._iptables_manager = IPTables()
self.fip_ns.create()
ns_name = self.fip_ns.get_name()
netns_cmd = ['ip', 'netns', 'exec', ns_name]
bind_cmd = ['sysctl', '-w', 'net.ipv4.ip_nonlocal_bind=1']
expected = [mock.call(netns_cmd + bind_cmd, check_exit_code=True,
extra_ok_codes=None, log_fail_as_error=False,
run_as_root=True)]
if old_kernel:
expected.append(mock.call(bind_cmd, check_exit_code=True,
extra_ok_codes=None,
log_fail_as_error=True,
run_as_root=True))
execute.assert_has_calls(expected)
def test_create_old_kernel(self):
self._test_create(True)
def test_create_new_kernel(self):
self._test_create(False)
@mock.patch.object(ip_lib, 'IPWrapper')
def test_destroy(self, IPWrapper):
ip_wrapper = IPWrapper()
dev1 = mock.Mock()
dev1.name = 'fpr-aaaa'
dev2 = mock.Mock()
dev2.name = 'fg-aaaa'
ip_wrapper.get_devices.return_value = [dev1, dev2]
with mock.patch.object(self.fip_ns.ip_wrapper_root.netns,
'delete') as delete:
self.fip_ns.delete()
delete.assert_called_once_with(mock.ANY)
ext_net_bridge = self.conf.external_network_bridge
ns_name = self.fip_ns.get_name()
self.driver.unplug.assert_called_once_with('fg-aaaa',
bridge=ext_net_bridge,
prefix='fg-',
namespace=ns_name)
ip_wrapper.del_veth.assert_called_once_with('fpr-aaaa')
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'device_exists')
def test_create_rtr_2_fip_link(self, device_exists, IPDevice, IPWrapper):
ri = mock.Mock()
ri.router_id = _uuid()
ri.rtr_fip_subnet = None
ri.ns_name = mock.sentinel.router_ns
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(ri.router_id)
fip_2_rtr_name = self.fip_ns.get_int_device_name(ri.router_id)
fip_ns_name = self.fip_ns.get_name()
self.fip_ns.local_subnets = allocator = mock.Mock()
pair = lla.LinkLocalAddressPair('169.254.31.28/31')
allocator.allocate.return_value = pair
device_exists.return_value = False
ip_wrapper = IPWrapper()
self.conf.network_device_mtu = 2000
ip_wrapper.add_veth.return_value = (IPDevice(), IPDevice())
self.fip_ns.create_rtr_2_fip_link(ri)
ip_wrapper.add_veth.assert_called_with(rtr_2_fip_name,
fip_2_rtr_name,
fip_ns_name)
device = IPDevice()
device.link.set_mtu.assert_called_with(2000)
self.assertEqual(2, device.link.set_mtu.call_count)
device.route.add_gateway.assert_called_once_with(
'169.254.31.29', table=16)
@mock.patch.object(ip_lib, 'IPWrapper')
@mock.patch.object(ip_lib, 'IPDevice')
@mock.patch.object(ip_lib, 'device_exists')
def test_create_rtr_2_fip_link_already_exists(self,
device_exists,
IPDevice,
IPWrapper):
ri = mock.Mock()
ri.router_id = _uuid()
ri.rtr_fip_subnet = None
device_exists.return_value = True
self.fip_ns.local_subnets = allocator = mock.Mock()
pair = lla.LinkLocalAddressPair('169.254.31.28/31')
allocator.allocate.return_value = pair
self.fip_ns.create_rtr_2_fip_link(ri)
ip_wrapper = IPWrapper()
self.assertFalse(ip_wrapper.add_veth.called)
@mock.patch.object(ip_lib, 'IPDevice')
def _test_scan_fip_ports(self, ri, ip_list, IPDevice):
IPDevice.return_value = device = mock.Mock()
device.addr.list.return_value = ip_list
self.fip_ns.get_rtr_ext_device_name = mock.Mock(
return_value=mock.sentinel.rtr_ext_device_name)
self.fip_ns.scan_fip_ports(ri)
@mock.patch.object(ip_lib, 'device_exists')
def test_scan_fip_ports_restart_fips(self, device_exists):
device_exists.return_value = True
ri = mock.Mock()
ri.dist_fip_count = None
ri.floating_ips_dict = {}
ip_list = [{'cidr': '111.2.3.4/32'}, {'cidr': '111.2.3.5/32'}]
self._test_scan_fip_ports(ri, ip_list)
self.assertEqual(2, ri.dist_fip_count)
@mock.patch.object(ip_lib, 'device_exists')
def test_scan_fip_ports_restart_none(self, device_exists):
device_exists.return_value = True
ri = mock.Mock()
ri.dist_fip_count = None
ri.floating_ips_dict = {}
self._test_scan_fip_ports(ri, [])
self.assertEqual(0, ri.dist_fip_count)
def test_scan_fip_ports_restart_zero(self):
ri = mock.Mock()
ri.dist_fip_count = 0
self._test_scan_fip_ports(ri, None)
self.assertEqual(0, ri.dist_fip_count)
| apache-2.0 | 98,048,349,394,170,850 | 42.014652 | 79 | 0.550285 | false | 3.588936 | true | false | false | 0 |
Ghost-script/dyno-chat | kickchat/apps/pulsar/async/eventloop.py | 1 | 3971 | import os
import asyncio
from threading import current_thread
from .access import thread_data, LOGGER
from .futures import Future, maybe_async, async, Task
from .threads import run_in_executor, QueueEventLoop, set_as_loop
__all__ = ['EventLoop', 'call_repeatedly', 'loop_thread_id']
class EventLoopPolicy(asyncio.AbstractEventLoopPolicy):
'''Pulsar event loop policy'''
def get_event_loop(self):
return thread_data('_event_loop')
def get_request_loop(self):
return thread_data('_request_loop') or self.get_event_loop()
def new_event_loop(self):
return EventLoop()
def set_event_loop(self, event_loop):
"""Set the event loop."""
assert event_loop is None or isinstance(event_loop,
asyncio.AbstractEventLoop)
if isinstance(event_loop, QueueEventLoop):
thread_data('_request_loop', event_loop)
else:
thread_data('_event_loop', event_loop)
asyncio.set_event_loop_policy(EventLoopPolicy())
Handle = asyncio.Handle
TimerHandle = asyncio.TimerHandle
class LoopingCall(object):
def __init__(self, loop, callback, args, interval=None):
self._loop = loop
self.callback = callback
self.args = args
self._cancelled = False
interval = interval or 0
if interval > 0:
self.interval = interval
self.handler = self._loop.call_later(interval, self)
else:
self.interval = None
self.handler = self._loop.call_soon(self)
@property
def cancelled(self):
return self._cancelled
def cancel(self):
'''Attempt to cancel the callback.'''
self._cancelled = True
def __call__(self):
try:
result = maybe_async(self.callback(*self.args), self._loop)
except Exception:
self._loop.logger.exception('Exception in looping callback')
self.cancel()
return
if isinstance(result, Future):
result.add_done_callback(self._might_continue)
else:
self._continue()
def _continue(self):
if not self._cancelled:
handler = self.handler
loop = self._loop
if self.interval:
handler._cancelled = False
handler._when = loop.time() + self.interval
loop._add_callback(handler)
else:
loop._ready.append(self.handler)
def _might_continue(self, fut):
try:
fut.result()
except Exception:
self._loop.logger.exception('Exception in looping callback')
self.cancel()
else:
self._continue()
class EventLoop(asyncio.SelectorEventLoop):
task_factory = Task
def __init__(self, selector=None, iothreadloop=False, logger=None):
super(EventLoop, self).__init__(selector)
self._iothreadloop = iothreadloop
self.logger = logger or LOGGER
self.call_soon(set_as_loop, self)
def __repr__(self):
return self.name
__str__ = __repr__
@property
def name(self):
if self.is_running():
return self.__class__.__name__
else:
return '%s <not running>' % self.__class__.__name__
def run_in_executor(self, executor, callback, *args):
return run_in_executor(self, executor, callback, *args)
def call_repeatedly(loop, interval, callback, *args):
"""Call a ``callback`` every ``interval`` seconds.
It handles asynchronous results. If an error occur in the ``callback``,
the chain is broken and the ``callback`` won't be called anymore.
"""
return LoopingCall(loop, callback, args, interval)
def loop_thread_id(loop):
'''Thread ID of the running ``loop``.
'''
waiter = asyncio.Future(loop=loop)
loop.call_soon(lambda: waiter.set_result(current_thread().ident))
return waiter
| gpl-2.0 | -7,694,598,141,315,368,000 | 28.414815 | 75 | 0.598338 | false | 4.140772 | false | false | false | 0.000252 |
jimsize/PySolFC | pysollib/games/special/hanoi.py | 1 | 5651 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
# PySol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.hint import CautiousDefaultHint
from pysollib.stack import \
InitialDealTalonStack, \
isRankSequence, \
BasicRowStack
# ************************************************************************
# * Tower of Hanoy
# ************************************************************************
class TowerOfHanoy_Hint(CautiousDefaultHint):
# FIXME: demo is completely clueless
pass
class TowerOfHanoy_RowStack(BasicRowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return 0
if not self.cards:
return 1
return self.cards[-1].rank > cards[0].rank
def getBottomImage(self):
return self.game.app.images.getReserveBottom()
class TowerOfHanoy(Game):
RowStack_Class = TowerOfHanoy_RowStack
Hint_Class = TowerOfHanoy_Hint
#
# game layout
#
def createGame(self):
# create layout
l, s = Layout(self), self.s
# set window
# (piles up to XX cards are fully playable in default window size)
h = max(2*l.YS, l.YS + (len(self.cards)-1)*l.YOFFSET + l.YM)
self.setSize(l.XM + 5*l.XS, l.YM + l.YS + h)
# create stacks
for i in range(3):
x, y, = l.XM + (i+1)*l.XS, l.YM
s.rows.append(
self.RowStack_Class(x, y, self, max_accept=1, max_move=1))
s.talon = InitialDealTalonStack(l.XM, self.height-l.YS, self)
# define stack-groups
l.defaultStackGroups()
#
# game overrides
#
def startGame(self):
self.startDealSample()
for i in range(3):
self.s.talon.dealRow()
def isGameWon(self):
for s in self.s.rows:
if len(s.cards) == len(self.cards):
return 1
return 0
def shallHighlightMatch(self, stack1, card1, stack2, card2):
return card1.rank + 1 == card2.rank or card2.rank + 1 == card1.rank
def getAutoStacks(self, event=None):
return ((), (), self.sg.dropstacks)
# ************************************************************************
# * Hanoi Puzzle
# ************************************************************************
class HanoiPuzzle_RowStack(TowerOfHanoy_RowStack):
def getBottomImage(self):
if self.id == len(self.game.s.rows) - 1:
return self.game.app.images.getSuitBottom()
return self.game.app.images.getReserveBottom()
class HanoiPuzzle4(TowerOfHanoy):
RowStack_Class = HanoiPuzzle_RowStack
def _shuffleHook(self, cards):
# no shuffling
return self._shuffleHookMoveToTop(cards, lambda c: (1, -c.id))
def startGame(self):
self.startDealSample()
for i in range(len(self.cards)):
self.s.talon.dealRow(rows=self.s.rows[:1])
def isGameWon(self):
return len(self.s.rows[-1].cards) == len(self.cards)
class HanoiPuzzle5(HanoiPuzzle4):
pass
class HanoiPuzzle6(HanoiPuzzle4):
pass
# ************************************************************************
# * Hanoi Sequence
# ************************************************************************
class HanoiSequence(TowerOfHanoy):
def isGameWon(self):
for s in self.s.rows:
if len(s.cards) == len(self.cards) and isRankSequence(s.cards):
return 1
return 0
# register the game
registerGame(GameInfo(124, TowerOfHanoy, "Tower of Hanoy",
GI.GT_PUZZLE_TYPE, 1, 0, GI.SL_SKILL,
suits=(2,), ranks=list(range(9))))
registerGame(GameInfo(207, HanoiPuzzle4, "Hanoi Puzzle 4",
GI.GT_PUZZLE_TYPE, 1, 0, GI.SL_SKILL,
suits=(2,), ranks=list(range(4)),
rules_filename="hanoipuzzle.html"))
registerGame(GameInfo(208, HanoiPuzzle5, "Hanoi Puzzle 5",
GI.GT_PUZZLE_TYPE, 1, 0, GI.SL_SKILL,
suits=(2,), ranks=list(range(5)),
rules_filename="hanoipuzzle.html"))
registerGame(GameInfo(209, HanoiPuzzle6, "Hanoi Puzzle 6",
GI.GT_PUZZLE_TYPE, 1, 0, GI.SL_SKILL,
suits=(2,), ranks=list(range(6)),
rules_filename="hanoipuzzle.html"))
registerGame(GameInfo(769, HanoiSequence, "Hanoi Sequence",
GI.GT_PUZZLE_TYPE, 1, 0, GI.SL_SKILL,
suits=(2,), ranks=list(range(9))))
| gpl-3.0 | -1,738,677,666,822,167,000 | 31.66474 | 79 | 0.553353 | false | 3.47968 | false | false | false | 0 |
storborg/manhattan | docs/conf.py | 1 | 3305 | import sys
import os
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
html_theme = 'default'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.dirname(os.path.abspath('.')))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Manhattan'
copyright = u'2012, Scott Torborg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Manhattan 0.3'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'Manhattandoc'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Manhattan.tex', u'Manhattan Documentation',
u'Scott Torborg', 'manual'),
]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'manhattan', u'Manhattan Documentation',
[u'Scott Torborg'], 1)
]
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'Manhattan',
u'Manhattan Documentation',
u'Scott Torborg',
'Manhattan',
'One line description of project.',
'Miscellaneous'),
]
| mit | -7,372,736,556,901,958,000 | 32.383838 | 79 | 0.666566 | false | 3.834107 | false | false | false | 0 |
xzYue/odoo | addons/mrp/tests/test_multicompany.py | 374 | 2660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMrpMulticompany(common.TransactionCase):
def setUp(self):
super(TestMrpMulticompany, self).setUp()
cr, uid = self.cr, self.uid
# Usefull models
self.ir_model_data = self.registry('ir.model.data')
self.res_users = self.registry('res.users')
self.stock_location = self.registry('stock.location')
group_user_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'base.group_user')
group_stock_manager_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.group_stock_manager')
company_2_id = self.registry('ir.model.data').xmlid_to_res_id(cr, uid, 'stock.res_company_1')
self.multicompany_user_id = self.res_users.create(cr, uid,
{'name': 'multicomp', 'login': 'multicomp',
'groups_id': [(6, 0, [group_user_id, group_stock_manager_id])],
'company_id': company_2_id, 'company_ids': [(6,0,[company_2_id])]})
def test_00_multicompany_user(self):
"""check no error on getting default mrp.production values in multicompany setting"""
cr, uid, context = self.cr, self.multicompany_user_id, {}
fields = ['location_src_id', 'location_dest_id']
defaults = self.stock_location.default_get(cr, uid, ['location_id', 'location_dest_id', 'type'], context)
for field in fields:
if defaults.get(field):
try:
self.stock_location.check_access_rule(cr, uid, [defaults[field]], 'read', context)
except Exception, exc:
assert False, "unreadable location %s: %s" % (field, exc)
| agpl-3.0 | 5,387,902,889,045,220,000 | 47.363636 | 117 | 0.608271 | false | 3.789174 | false | false | false | 0.004135 |
rkq/cxxexp | third-party/src/boost_1_56_0/tools/build/test/default_toolset.py | 51 | 7777 | #!/usr/bin/python
# Copyright 2008 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test that the expected default toolset is used when no toolset is explicitly
# specified on the command line or used from code via the using rule. Test that
# the default toolset is correctly used just like any other explicitly used
# toolset (e.g. toolset prerequisites, properties conditioned on toolset
# related features, etc.).
#
# Note that we need to ignore regular site/user/test configuration files to
# avoid them marking any toolsets not under our control as used.
import BoostBuild
# Line displayed by Boost Build when using the default toolset.
configuring_default_toolset_message = \
'warning: Configuring default toolset "%s".'
###############################################################################
#
# test_conditions_on_default_toolset()
# ------------------------------------
#
###############################################################################
def test_conditions_on_default_toolset():
"""Test that toolset and toolset subfeature conditioned properties get
applied correctly when the toolset is selected by default. Implicitly tests
that we can use the set-default-toolset rule to set the default toolset to
be used by Boost Build.
"""
t = BoostBuild.Tester("--user-config= --ignore-site-config",
pass_toolset=False, use_test_config=False)
toolset_name = "myCustomTestToolset"
toolset_version = "v"
toolset_version_unused = "v_unused"
message_loaded = "Toolset '%s' loaded." % toolset_name
message_initialized = "Toolset '%s' initialized." % toolset_name ;
# Custom toolset.
t.write(toolset_name + ".jam", """
import feature ;
ECHO "%(message_loaded)s" ;
feature.extend toolset : %(toolset_name)s ;
feature.subfeature toolset %(toolset_name)s : version : %(toolset_version)s %(toolset_version_unused)s ;
rule init ( version ) { ECHO "%(message_initialized)s" ; }
""" % {'message_loaded' : message_loaded ,
'message_initialized' : message_initialized,
'toolset_name' : toolset_name ,
'toolset_version' : toolset_version ,
'toolset_version_unused': toolset_version_unused})
# Main Boost Build project script.
t.write("jamroot.jam", """
import build-system ;
import errors ;
import feature ;
import notfile ;
build-system.set-default-toolset %(toolset_name)s : %(toolset_version)s ;
feature.feature description : : free incidental ;
# We use a rule instead of an action to avoid problems with action output not
# getting piped to stdout by the testing system.
rule buildRule ( names : targets ? : properties * )
{
local descriptions = [ feature.get-values description : $(properties) ] ;
ECHO "descriptions:" /$(descriptions)/ ;
local toolset = [ feature.get-values toolset : $(properties) ] ;
ECHO "toolset:" /$(toolset)/ ;
local toolset-version = [ feature.get-values "toolset-$(toolset):version" : $(properties) ] ;
ECHO "toolset-version:" /$(toolset-version)/ ;
}
notfile testTarget
: @buildRule
:
:
<description>stand-alone
<toolset>%(toolset_name)s:<description>toolset
<toolset>%(toolset_name)s-%(toolset_version)s:<description>toolset-version
<toolset>%(toolset_name)s-%(toolset_version_unused)s:<description>toolset-version-unused ;
""" % {'toolset_name' : toolset_name ,
'toolset_version' : toolset_version,
'toolset_version_unused': toolset_version_unused})
t.run_build_system()
t.expect_output_lines(configuring_default_toolset_message % toolset_name)
t.expect_output_lines(message_loaded)
t.expect_output_lines(message_initialized)
t.expect_output_lines("descriptions: /stand-alone/ /toolset/ "
"/toolset-version/")
t.expect_output_lines("toolset: /%s/" % toolset_name)
t.expect_output_lines("toolset-version: /%s/" % toolset_version)
t.cleanup()
###############################################################################
#
# test_default_toolset_on_os()
# ----------------------------
#
###############################################################################
def test_default_toolset_on_os( os, expected_toolset ):
"""Test that the given toolset is used as the default toolset on the given
os. Uses hardcoded knowledge of how Boost Build decides on which host OS it
is currently running. Note that we must not do much after tricking Boost
Build into believing it has a specific host OS as this might mess up other
important internal Boost Build state.
"""
t = BoostBuild.Tester("--user-config= --ignore-site-config",
pass_toolset=False, use_test_config=False)
t.write("jamroot.jam", "modules.poke os : .name : %s ;" % os)
# We need to tell the test system to ignore stderr output as attempting to
# load missing toolsets might cause random failures with which we are not
# concerned in this test.
t.run_build_system(stderr=None)
t.expect_output_lines(configuring_default_toolset_message %
expected_toolset)
t.cleanup()
###############################################################################
#
# test_default_toolset_requirements()
# -----------------------------------
#
###############################################################################
def test_default_toolset_requirements():
"""Test that default toolset's requirements get applied correctly.
"""
t = BoostBuild.Tester("--user-config= --ignore-site-config",
pass_toolset=False, use_test_config=False,
ignore_toolset_requirements=False)
toolset_name = "customTestToolsetWithRequirements"
# Custom toolset.
t.write(toolset_name + ".jam", """
import feature ;
import toolset ;
feature.extend toolset : %(toolset_name)s ;
toolset.add-requirements <description>toolset-requirement ;
rule init ( ) { }
""" % {'toolset_name': toolset_name})
# Main Boost Build project script.
t.write("jamroot.jam", """
import build-system ;
import errors ;
import feature ;
import notfile ;
build-system.set-default-toolset %(toolset_name)s ;
feature.feature description : : free incidental ;
# We use a rule instead of an action to avoid problems with action output not
# getting piped to stdout by the testing system.
rule buildRule ( names : targets ? : properties * )
{
local descriptions = [ feature.get-values description : $(properties) ] ;
ECHO "descriptions:" /$(descriptions)/ ;
local toolset = [ feature.get-values toolset : $(properties) ] ;
ECHO "toolset:" /$(toolset)/ ;
}
notfile testTarget
: @buildRule
:
:
<description>target-requirement
<description>toolset-requirement:<description>conditioned-requirement
<description>unrelated-condition:<description>unrelated-description ;
""" % {'toolset_name': toolset_name})
t.run_build_system()
t.expect_output_lines(configuring_default_toolset_message % toolset_name)
t.expect_output_lines("descriptions: /conditioned-requirement/ "
"/target-requirement/ /toolset-requirement/")
t.expect_output_lines("toolset: /%s/" % toolset_name)
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
test_default_toolset_on_os("NT" , "msvc")
test_default_toolset_on_os("LINUX" , "gcc" )
test_default_toolset_on_os("CYGWIN" , "gcc" )
test_default_toolset_on_os("SomeOtherOS", "gcc" )
test_default_toolset_requirements()
test_conditions_on_default_toolset()
| mit | -6,397,694,269,829,260,000 | 35.172093 | 104 | 0.626334 | false | 4.046306 | true | false | false | 0.005143 |
hellodata/hellodate | 2/site-packages/django/utils/translation/__init__.py | 49 | 6780 | """
Internationalization support.
"""
from __future__ import unicode_literals
import re
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, six.integer_types):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
class NumberAwareString(resultclass):
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError('Your dictionary lacks key \'%s\'. '
'Please provide it, because it is required to '
'determine whether string is singular or plural.'
% number)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
return proxy
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(object):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
self.old_language = get_language()
def __enter__(self):
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
return LANG_INFO[lang_code]
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
return LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
trim_whitespace_re = re.compile('\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
| lgpl-3.0 | 3,674,936,512,348,609,500 | 28.606987 | 114 | 0.6559 | false | 4.023739 | false | false | false | 0.002212 |
amburan/moose | framework/contrib/nsiqcppstyle/nsiqcppstyle_checker.py | 78 | 38541 | # Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
import os
import traceback
from nsiqcppstyle_rulehelper import * #@UnusedWildImport
# Reserved words
tokens = [
'ID',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL', 'RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON', 'DOUBLECOLON',
# Ellipsis (...)
'ELLIPSIS',
# macro
'PREPROCESSOR',
'SHARPSHARP',
'SHARP',
#
'NUMBER',
'CHARACTER', 'STRING',
'SPACE',
'COMMENT', 'CPPCOMMENT',
'LINEFEED',
'PREPROCESSORNEXT',
'ASM',
'IGNORE', 'DEFAULT', 'DELETE',
# cast
'CONST_CAST', 'DYNAMIC_CAST', 'REINTERPRET_CAST', 'STATIC_CAST',
#control
'CONST',
'WHILE', 'IF', 'FOR', 'DO', 'ELSE',
'ENUM', 'EXPORT', 'EXTERN', 'TRUE', 'FALSE', 'GOTO',
'SWITCH', 'CASE', 'CONTST', 'CATCH',
'BREAK', 'CONTINUE',
'TRY', 'THROW',
#Operator
'NEW', 'OPERATOR',
'SIZEOF',
'INLINE', 'NAMESPACE', 'RETURN',
#visibility
'PUBLIC', 'PRIVATE', 'PROTECTED',
#Type
'STATIC', 'STRUCT', 'TEMPLATE', 'THIS', 'TYPEDEF',
'TYPENAME', 'UNION', 'USING', 'VIRTUAL',
'CLASS', "AUTO",
"CHAR",
"INT",
"LONG",
"DOUBLE",
"FLOAT",
"SHORT",
"BOOL",
"VOID"
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
t_DOUBLECOLON = r'::'
# Assignment operators
t_PREPROCESSOR = r'\#\s*[A-Za-z_][A-Za-z0-9_]*'
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'\^='
# Increment/decrement
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
def t_ID(t) :
r'[A-Za-z_][A-Za-z0-9_]*'
t.type = reserved.get(t.value, 'ID')
return t
reserved = {
"for":"FOR",
"class":"CLASS",
"asm":"ASM",
"switch":"SWITCH",
"case":"CASE",
"catch":"CATCH",
"auto":"AUTO",
"break":"BREAK",
"continue":"CONTINUE",
"default":"DEFAULT",
"delete":"DELETE",
"const_cast":"CONST_CAST",
"dynamic_cast":"DYNAMIC_CAST",
"reinterpret_cast":"REINTERPRET_CAST",
"static_cast":"STATIC_CAST",
"while":"WHILE",
"if":"IF",
"do":"DO",
"else":"ELSE",
"enum":"ENUM",
"export":"EXPORT",
"extern":"EXTERN",
"true":"TRUE",
"false":"FALSE",
"const":"CONST",
"goto":"GOTO",
"inline":"INLINE",
"namespace":"NAMESPACE",
"new":"NEW",
"operator":"OPERATOR",
"return":"RETURN",
"public":"PUBLIC",
"private":"PRIVATE",
"protected":"PROTECTED",
"sizeof":"SIZEOF",
"static":"STATIC",
"struct":"STRUCT",
"template":"TEMPLATE",
"this":"THIS",
"throw":"THROW",
"try":"TRY",
"typedef":"TYPEDEF",
"typename":"TYPENAME",
"union":"UNION",
"using":"USING",
"virtual":"VIRTUAL",
"bool":"BOOL",
"char":"CHAR",
"int":"INT",
"long":"LONG",
"double":"DOUBLE",
"float":"FLOAT",
"short":"SHORT",
"void":"VOID",
"__declspec":"IGNORE",
"volatile":"IGNORE",
"typeid":"IGNORE",
"mutable":"IGNORE",
"explicit":"IGNORE",
"friends":"IGNORE",
"register":"IGNORE",
"unsinged":"IGNORE",
"signed":"IGNORE",
"__based":"IGNORE",
"__cdecl":"IGNORE",
"__except":"IGNORE",
"__finally":"IGNORE",
"__inline":"IGNORE",
"__attribute":"IGNORE",
"_based":"IGNORE",
"__stdcall":"IGNORE",
"__try":"IGNORE",
"dllexport":"IGNORE"
}
def t_IGNORE(t):
r"__attribute\(.*\)|__section\(.*\)"
return t
def t_LINEFEED(t):
r'[\n]+'
t.lexer.lineno += t.value.count('\n')
return t
def t_SPACE(t):
r'[ \t]+'
return t
t_PREPROCESSORNEXT = r"\\"
t_NUMBER = r'[0-9][0-9XxA-Fa-fL]*'
t_SHARPSHARP = r'\#\#'
t_SHARP = r'\#'
# String literal
def t_STRING(t):
r'"([^\\]|(\\.)|(\\\n))*?"'
t.lexer.lineno += t.value.count('\n')
return t
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
if Search(r"/\*\*\s", t.value) :
t.additional = 'DOXYGEN'
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*'
if Search(r"^///\b", t.value) :
t.additional = 'DOXYGEN'
if Search(r"^///<", t.value) :
t.additional = 'DOXYGENPOST'
return t
def t_error(t):
if nsiqcppstyle_state._nsiqcppstyle_state.verbose :
print "Illegal character '%s'" % t.value[0], t.lexer.lineno;
t.lexer.skip(1)
class CppLexerNavigator(object):
"""
Main class for Cpp Lexer
"""
def __init__(self, filename, data= None):
self.filename = filename
self.tokenlist = []
self.indexstack = []
self.tokenindex = -1
self.matchingPair = {}
self.reverseMatchingPair = {}
self.ifdefstack = []
import nsiqcppstyle_lexer
lexer = nsiqcppstyle_lexer.lex()
self.data = data
if data == None :
f = open(filename)
self.data = f.read()
self.lines = self.data.splitlines()
lexer.input(self.data)
index = 0
while True :
tok = lexer.token()
if not tok : break;
tok.column = self._GetColumn(tok)
tok.index = index
tok.inactive = False
index += 1
self.tokenlist.append(tok)
tok.line = self.lines[tok.lineno-1]
tok.filename = self.filename
tok.pp = None
# self.ProcessIfdef(tok)
self.tokenlistsize = len(self.tokenlist)
self.PushTokenIndex()
while(True) :
t = self.GetNextToken()
if t == None : break
t.inactive = self.ProcessIfdef(t)
self.PopTokenIndex()
def ProcessIfdef(self, token):
if token.type == "PREPROCESSOR" :
if Match(r"^#\s*if(n)?def$", token.value) :
self.ifdefstack.append(True)
elif Match(r"^#\s*if$", token.value) :
nextToken = self.PeekNextTokenSkipWhiteSpaceAndComment()
if nextToken != None and nextToken.value == "0" :
self.ifdefstack.append(False)
else:
self.ifdefstack.append(True)
elif Match(r"^#\s*endif$", token.value) :
if len(self.ifdefstack) != 0 : self.ifdefstack.pop()
for ifdef in self.ifdefstack :
if not ifdef :
return True
return False
def Backup(self):
"""
Back up the current context in lexer to be restored later
"""
return (self.tokenindex, self.indexstack[:])
def Restore(self, data):
"""
Restore the lexer context.
tuple using tokenindex and indexstack should be passed
"""
self.tokenindex = data[0]
self.indexstack = data[1]
def Reset(self):
"""
Reset Lexer
"""
self.tokenindex = -1
self.indexstack = []
def GetCurTokenLine(self):
"""
Get Current Token, if No current token, return None
"""
curToken = self.GetCurToken()
if curToken != None :
return self.lines[curToken.lineno-1]
return None
def _MoveToToken(self, token):
self.tokenindex = token.index
def _GetColumn(self, token):
"""
Get given token column
"""
last_cr = self.data.rfind('\n', 0 ,token.lexpos)
if last_cr < 0:
last_cr = -1
column = (token.lexpos - last_cr)
if column == 0 : return 1
return column
def GetCurToken(self):
"""
Get Current Token
"""
return self.tokenlist[self.tokenindex]
def PushTokenIndex(self):
"""
Push Current Token Index into stack to keep current token.
"""
self.indexstack.append(self.tokenindex)
def PopTokenIndex(self):
"""
Pop token index stack to roll back to previously pushed token
"""
self.tokenindex = self.indexstack.pop()
def GetNextTokenSkipWhiteSpace(self):
"""
Get Next Token skip the white space.
"""
return self.GetNextToken(True)
def PeekNextToken(self):
self.PushTokenIndex()
token = self._GetNextToken()
self.PopTokenIndex()
return token
def PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess(self, offset = 1):
"""
Get Next Token skip whitespace, comment and preprocess.
This method doesn't change the current lex position.
"""
self.PushTokenIndex()
token = None
for x in range(offset) : #@UnusedVariable
token = self.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
self.PopTokenIndex()
return token
def PeekNextTokenSkipWhiteSpaceAndComment(self):
"""
Get Next Token skip whitespace and comment.
This method doesn't change the current lex position.
"""
self.PushTokenIndex()
token = self.GetNextTokenSkipWhiteSpaceAndComment()
self.PopTokenIndex()
return token
def PeekPrevToken(self):
self.PushTokenIndex()
token = self._GetPrevToken()
self.PopTokenIndex()
return token
def PeekPrevTokenSkipWhiteSpaceAndCommentAndPreprocess(self, offset = 1):
"""
Get Previous Token skip whitespace and comment.
This method doesn't change the current lex position.
"""
self.PushTokenIndex()
token = None;
for x in range(offset): #@UnusedVariable
token = self.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess()
self.PopTokenIndex()
return token
def PeekPrevTokenSkipWhiteSpaceAndComment(self):
"""
Get Previous Token skip whitespace and comment.
This method doesn't change the current lex position.
"""
self.PushTokenIndex()
token = self.GetPrevTokenSkipWhiteSpaceAndComment()
self.PopTokenIndex()
return token
def GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess(self):
"""
Get Next Token skip whitespace, comment, preprocess
This method changes the current lex position.
"""
return self.GetNextToken(True, True, True)
def GetNextTokenSkipWhiteSpaceAndComment(self):
"""
Get Next Token skip whitespace and comment.
This method changes the current lex position.
"""
return self.GetNextToken(True, True)
def GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess(self):
"""
Get Previous Token skip whitespace, comment, process.
This method changes the current lex position.
"""
return self.GetPrevToken(True, True, True)
def GetPrevTokenSkipWhiteSpaceAndComment(self):
"""
Get Previous Token skip whitespace and comment.
This method changes the current lex position.
"""
return self.GetPrevToken(True, True)
def GetNextToken(self, skipWhiteSpace=False,
skipComment=False, skipDirective=False,
skipMatchingBraces=False):
"""
Get Next Token with various option
- skipWhiteSpace - skip white space
- skipComment - skip comment
- skipDirective - skip preprocessor line
- skipMatchingBraces - skip all { [ ( and matching pair
"""
context = self._SkipContext(skipWhiteSpace, skipComment)
while(True) :
token = self._GetNextToken()
if token == None :
return token
if token.inactive == True:
continue
if skipMatchingBraces and token.type in ["LPAREN", "LBRACE", "LBRACKET"] :
self.GetNextMatchingToken()
continue
if skipDirective :
if token.pp == True :
continue
if token.type not in context :
if token != None :
token.column = self._GetColumn(token)
return token
def GetNextMatchingGT(self, keepCur=False):
if keepCur : self.PushTokenIndex()
gtStack = []
if self.GetCurToken().type != "LT" :
raise RuntimeError('Matching token should be examined when cur token is } ) ]')
gtStack.append(self.GetCurToken())
t = self._GetNextMatchingGTToken(gtStack)
if keepCur : self.PopTokenIndex()
return t
def _GetNextMatchingGTToken(self, tokenStack):
while True :
nextToken = self._GetNextToken()
if nextToken == None :
return None
elif nextToken.type in ["LT"] :
tokenStack.append(nextToken)
elif nextToken.type in ["GT"] :
tokenStack.pop()
if len(tokenStack) == 0 :
return nextToken
else :
return None
elif nextToken.type in ["RSHIFT"] :
tokenStack.pop()
tokenStack.pop()
if len(tokenStack) == 0 :
return nextToken
else :
return None
def GetNextMatchingToken(self, keepCur = False):
"""
Get matching token
"""
if keepCur : self.PushTokenIndex()
tokenStack = []
if self.GetCurToken().type not in ["LPAREN", "LBRACE", "LBRACKET"] :
raise RuntimeError('Matching token should be examined when cur token is { [ (')
tokenStack.append(self.GetCurToken())
t = self._GetNextMatchingToken(tokenStack)
if keepCur : self.PopTokenIndex()
return t
def _GetNextMatchingToken(self, tokenStack):
searchToken = tokenStack[-1]
matchingToken = self.matchingPair.get(searchToken, None)
lastPopedToken = None
if matchingToken != None :
self._MoveToToken(matchingToken)
return matchingToken
while True :
nextToken = self._GetNextToken()
if nextToken == None :
if self.reverseMatchingPair.has_key(lastPopedToken) :
return None
self.matchingPair[searchToken] = lastPopedToken
self.reverseMatchingPair[lastPopedToken] = searchToken
return lastPopedToken
if nextToken.type in ["LPAREN", "LBRACE", "LBRACKET"] :
tokenStack.append(nextToken)
#print "Push", nextToken
if nextToken.type in ["RPAREN", "RBRACE", "RBRACKET"] :
prevTokenPair = tokenStack[-1]
if prevTokenPair != None :
if prevTokenPair.type[1:] == nextToken.type[1:] :
tokenStack.pop()
lastPopedToken = nextToken
if len(tokenStack) == 0 :
if self.reverseMatchingPair.has_key(nextToken) :
return None
self.matchingPair[searchToken] = nextToken
self.reverseMatchingPair[nextToken] = searchToken
return nextToken
else :
return None
else :
return None
def GetPrevTokenSkipWhiteSpace(self):
return self.GetPrevToken(True)
# def GetPrevTokenSkipWhiteSpaceAndComment(self):
# return self.GetPrevToken(True, True)
def GetPrevToken(self, skipWhiteSpace=False,
skipComment=False, skipDirective=False,
skipMatchingBraces=False):
context = self._SkipContext(skipWhiteSpace, skipComment)
while(True) :
token = self._GetPrevToken()
if token == None :
return token
if token.inactive :
continue
if skipMatchingBraces and token.type in ["RPAREN", "RBRACE", "RBRACKET"] :
self.GetPrevMatchingToken()
continue
if skipDirective :
line = self.GetCurTokenLine()
if Search(r"^\s*#", line) :
continue
if token.type not in context :
return token
def GetPrevMatchingToken(self, keepCur=False):
if keepCur : self.PushTokenIndex()
tokenStack = []
if self.GetCurToken().type not in ["RPAREN", "RBRACE", "RBRACKET"] :
raise RuntimeError('Matching token should be examined when cur token is } ) ]')
tokenStack.append(self.GetCurToken())
t = self._GetPrevMatchingToken(tokenStack)
if keepCur : self.PopTokenIndex()
return t
def _GetPrevMatchingToken(self, tokenStack):
searchToken = tokenStack[-1]
matchingToken = self.reverseMatchingPair.get(searchToken, None)
if matchingToken != None :
self._MoveToToken(matchingToken)
return matchingToken
while True :
prevToken = self._GetPrevToken()
if prevToken == None :
return None
if prevToken.type in ["RPAREN", "RBRACE", "RBRACKET"] :
tokenStack.append(prevToken)
#print "Push", nextToken
elif prevToken.type in ["LPAREN", "LBRACE", "LBRACKET"] :
prevTokenPair = tokenStack[-1]
if prevTokenPair != None :
if prevTokenPair.type[1:] == prevToken.type[1:] :
tokenStack.pop()
#print "Pop", nextToken
#print tokenStack
if len(tokenStack) == 0 :
self.reverseMatchingPair[searchToken] = prevToken
self.matchingPair[prevToken] = searchToken
return prevToken
else :
return None
else :
return None
def _SkipContext(self, skipWhiteSpace=False,
skipComment=False):
context = []
if skipWhiteSpace :
context.append("SPACE")
context.append("LINEFEED")
if skipComment :
context.append("COMMENT")
context.append("CPPCOMMENT")
return context
def _GetNextToken(self):
if self.tokenindex < self.tokenlistsize - 1:
self.tokenindex = self.tokenindex + 1
return self.tokenlist[self.tokenindex]
else :
return None;
def _GetPrevToken(self):
if self.tokenindex >= 0:
self.tokenindex = self.tokenindex - 1
if self.tokenindex == -1 :
return None
return self.tokenlist[self.tokenindex]
else :
return None;
def GetPrevTokenInType(self, type, keepCur=True, skipPreprocess=True):
if keepCur : self.PushTokenIndex()
token = None
while(True) :
token = self.GetPrevToken()
if token == None :
break
elif token.type == type :
if skipPreprocess and token.pp :
continue
break
if keepCur : self.PopTokenIndex()
return token
def GetPrevTokenInTypeList(self, typelist, keepCur=True, skipPreprocess=True):
if keepCur : self.PushTokenIndex()
token = None
while(True) :
token = self.GetPrevToken(False, False, skipPreprocess, False)
if token == None :
break
elif token.type in typelist :
if skipPreprocess and token.pp :
continue
break
if keepCur : self.PopTokenIndex()
return token
def MoveToNextToken(self):
if self.tokenindex < self.tokenlistsize - 1:
self.tokenindex = self.tokenindex + 1
def MoveToPrevToken(self):
if self.tokenindex > 0:
self.tokenindex = self.tokenindex - 1
def GetNextTokenInType(self, type, keepCur = False, skipPreprocess = True):
if keepCur : self.PushTokenIndex()
token = None
while(True) :
token = self.GetNextToken()
if token == None :
break
elif token.type == type :
if skipPreprocess and token.pp :
continue
break
if keepCur : self.PopTokenIndex()
return token
def GetNextTokenInTypeList(self, typelist, keepCur = False, skipPreprocess=True):
if keepCur : self.PushTokenIndex()
token = None
while(True) :
token = self.GetNextToken()
if token == None :
break
elif token.type in typelist :
if skipPreprocess and token.pp :
continue
break
if keepCur : self.PopTokenIndex()
return token
def HasBody(self):
if self.GetCurToken() == None : return False
token_id2 = self.GetNextTokenInType("LBRACE", True)
token_id3 = self.GetNextTokenInType("SEMI", True)
if token_id3 == None and token_id2 != None :
return True
if token_id2 != None :
if token_id2.lexpos < token_id3.lexpos :
return True
return False
class Context :
def __init__(self, type, name, sig=False, starttoken = None, endtoken = None):
self.type = type
self.name = name
self.sig = sig
self.startToken = starttoken
self.endToken = endtoken
self.additional = ""
def __str__(self):
return "%s, %s, %s, %s" % (self.type, self.name, self.startToken, self.endToken)
def IsContextStart(self, token):
return token == self.startToken
def IsContextEnd(self, token):
return token == self.endToken
def InScope(self, token):
if token.lexpos >= self.startToken.lexpos and token.lexpos <= self.endToken.lexpos :
return True
return False
class ContextStack :
def __init__(self):
self.contextstack = []
pass
def Push(self, context):
self.contextstack.append(context)
def Pop(self):
if self.Size() == 0 :
return None
return self.contextstack.pop()
def Peek(self):
if self.Size() == 0 :
return None
return self.contextstack[-1]
def SigPeek(self) :
i = len(self.contextstack)
while(True) :
if i == 0 : break
i -= 1
if self.contextstack[i].sig :
return self.contextstack[i]
return None
def Size(self):
return len(self.contextstack)
def IsEmpty(self):
return len(self.contextstack) == 0
def ContainsIn(self, type):
i = len(self.contextstack)
while(True) :
if i == 0 : break
i -= 1
if self.contextstack[i].type ==type :
return True
return False
def __str__(self):
a = ""
for eachContext in self.contextstack :
a += (eachContext.__str__() + " >> ")
return a
def Copy(self):
contextStack = ContextStack()
contextStack.contextstack = self.contextstack[:]
return contextStack
class _ContextStackStack:
def __init__(self):
self.contextstackstack = []
def Push(self, contextstack):
self.contextstackstack.append(contextstack)
def Pop(self):
if len(self.contextstackstack) == 0 :
return None
context = self.contextstackstack.pop()
return context
def Peek(self):
if len(self.contextstackstack) == 0 :
return None
return self.contextstackstack[-1]
############################################################################
# Parser and Rule Runner Invocator
############################################################################
#AddLineRule(lineRule)
import sys
def ProcessFile(ruleManager, file, data=None):
# print file
lexer = CppLexerNavigator(file, data)
ContructContextInfo(lexer)
# Run Rules
lexer.Reset()
RunRules(ruleManager, lexer)
def ContructContextInfo(lexer):
# classstate = None
# depth = 0
contextStack = ContextStack()
contextStackStack = _ContextStackStack()
contextStackStack.Push(contextStack)
contextPrediction = None
ppScope = False
prevLine = 0
templateContext = None
nsiqcppstyle_state._nsiqcppstyle_state.ResetRuleSuppression()
comment = lexer.GetNextTokenInTypeList(("COMMENT", "CPPCOMMENT"), True)
if comment != None :
for e in FindAll("--\s*(RULE\w*)", comment.value) :
nsiqcppstyle_state._nsiqcppstyle_state.SuppressRule(e)
t = None
# Construct Context
while(True) :
try :
t = lexer.GetNextTokenSkipWhiteSpaceAndComment()
if t == None : break
t.contextStack = None
t.context = None
if t.type == "PREPROCESSOR" :
t.pp = True
ppScope = True
elif ppScope == True :
if prevLine == t.lineno-1:
prevToken = lexer.PeekPrevTokenSkipWhiteSpaceAndComment()
if prevToken != None :
ppScope = (prevToken.type == "PREPROCESSORNEXT")
t.pp = ppScope
elif prevLine == t.lineno :
t.pp = True
else :
ppScope = False
if templateContext != None and not templateContext.InScope(t) :
templateContext = None
if t.type in ['LBRACE', 'LPAREN', 'LBRACKET'] :
if contextPrediction != None and contextPrediction.IsContextStart(t):
contextStack = contextStack.Copy()
contextStack.Push(contextPrediction)
contextStackStack.Push(contextStack)
contextPrediction = None
else :
mt = lexer.GetNextMatchingToken(True)
if mt != None:
contextStack = contextStack.Copy()
contextStack.Push(Context(t.type[1:]+"BLOCK", "", False, t.lexpos, mt.lexpos))
contextStackStack.Push(contextStack)
# print depth, "Push", contextStack
elif t.type in ["RBRACE", "RPAREN", "RBRACKET"] :
contextStackStack.Pop()
contextStack = contextStackStack.Peek()
#print depth, "Pop ", contextStack
# Type Prediction
elif t.type == "TEMPLATE" :
lexer.PushTokenIndex()
t2 = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
if t2 != None and t2.type == "LT" :
t3 = lexer.GetNextMatchingGT()
templateContext = Context("TEMPLATE", "", False, t2, t3)
lexer.PopTokenIndex()
elif t.pp != True and templateContext == None and t.type in ["CLASS", "STRUCT", "ENUM", "UNION", "NAMESPACE"] and contextPrediction == None:
prevToken = lexer.PeekPrevTokenSkipWhiteSpaceAndCommentAndPreprocess()
hasBody = lexer.HasBody()
fullName = ""
curContext = contextStack.Peek()
if (prevToken == None or prevToken.value != "<") and (curContext == None or curContext.type != "PARENBLOCK") and hasBody :
lexer.PushTokenIndex()
# find start and end brace block
contextStart = lexer.GetNextTokenInType("LBRACE")
contextEnd = lexer.GetNextMatchingToken(contextStart)
lexer.PopTokenIndex()
lexer.PushTokenIndex()
if contextEnd != None :
while(True) :
token = lexer.GetNextTokenInType("ID")
nextIDtoken = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
if token == None or token.lexpos > contextStart.lexpos :
break;
if nextIDtoken.type == "ID" :
continue
nextToken = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
if token != None and nextToken != None and nextToken.type == "DOUBLECOLON":
fullName += (token.value + "::")
continue
else :
fullName += token.value
break
contextPrediction = Context(t.type + "_BLOCK", fullName, True, contextStart, contextEnd)
lexer.PopTokenIndex()
t.type = "TYPE"
t.fullName = fullName
t.context = contextPrediction
t.decl = not hasBody
# RunTypeRule(lexer, fullName, not hasBody, contextStack, contextPrediction)
# Function Prediction
elif t.pp != True and t.type in ("ID", "OPERATOR") and contextPrediction == None:
curNotSigContext = contextStack.Peek()
if (curNotSigContext != None and curNotSigContext.sig == False):
continue
t2 = None
t4 = None
if t.type == "ID" :
t2 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
t4 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess(2)
else :
t2 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
if t2.type == "LPAREN" :
t2 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess(3)
t4 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess(4)
else :
t2 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess(2)
t4 = lexer.PeekNextTokenSkipWhiteSpaceAndCommentAndPreprocess(3)
t3 = lexer.PeekPrevTokenSkipWhiteSpaceAndCommentAndPreprocess()
curContext = contextStack.SigPeek()
if (t3 == None or t3.type != "NEW") and t2 != None and t2.type == "LPAREN" and (curContext == None or curContext.type in ["CLASS_BLOCK", "STRUCT_BLOCK", "NAMESPACE_BLOCK"]) and t4.type != "STRING":
# Check The ID after the next RPAREN
# if there is ID or None it's not a function.
# in case HELLO() HELLO2()
lexer.PushTokenIndex()
lexer._MoveToToken(t2)
lexer.GetNextMatchingToken()
t5 = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
if t5 == None or t5.type == "ID" :
lexer.PopTokenIndex()
continue
lexer.PopTokenIndex()
##############################
lexer.PushTokenIndex()
contextPrediction = None
fullName = t.value
lexer.PushTokenIndex()
if t.type == "OPERATOR" :
fullName = fullName + t3.value
else :
while(True) :
prevName = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess()
if prevName != None :
if prevName.type == "NOT" :
fullName = "~" + fullName
elif prevName.type == "DOUBLECOLON" :
fullName = "::" + fullName
fullName = lexer.GetPrevTokenSkipWhiteSpaceAndCommentAndPreprocess().value + fullName
else : break
else : break
lexer.PopTokenIndex()
if Match(r"^[A-Z_][A-Z_0-9][A-Z_0-9]+$", fullName) :
continue
impl = lexer.HasBody()
if impl :
contextStart = lexer.GetNextTokenInType("LBRACE")
contextEnd = lexer.GetNextMatchingToken(contextStart)
if contextEnd != None :
contextPrediction = Context("FUNCTION_BLOCK", fullName, True, contextStart, contextEnd)
#RunFunctionRule(lexer, functionName, decl, contextStack, contextPrediction)
t.type = "FUNCTION"
t.fullName = fullName
t.context = contextPrediction
t.decl = not impl
lexer.PopTokenIndex()
#print "TT", lexer.GetCurTokenLine(), impl, contextPrediction
t.contextStack = contextStack
prevLine = t.lineno
except Exception, e:
if nsiqcppstyle_state._nsiqcppstyle_state.verbose :
print >> sys.stderr, "Context Construnction Error : ", t, t.contextStack, e
traceback.print_exc(file=sys.stderr)
def RunRules(ruleManager, lexer):
try :
ruleManager.RunFileStartRule(lexer, os.path.basename(lexer.filename), os.path.dirname(lexer.filename))
except Exception, e:
if nsiqcppstyle_state._nsiqcppstyle_state.verbose :
print >> sys.stderr, "Rule Error : ", e
traceback.print_exc(file=sys.stderr)
currentLine = 0
t = None
while(True) :
try :
t = lexer.GetNextTokenSkipWhiteSpaceAndComment()
if t == None : break
if currentLine != t.lineno :
currentLine = t.lineno
ruleManager.RunLineRule(lexer, lexer.GetCurTokenLine(), currentLine)
if t.pp == True :
ruleManager.RunPreprocessRule(lexer, t.contextStack)
else :
if t.type == 'TYPE' :
ruleManager.RunTypeNameRule(lexer, t.value.upper(), t.fullName, t.decl, t.contextStack, t.context)
elif t.type == 'FUNCTION' :
ruleManager.RunFunctionNameRule(lexer, t.fullName, t.decl, t.contextStack, t.context)
elif t.contextStack != None and t.contextStack.SigPeek() != None :
sigContext = t.contextStack.SigPeek()
if sigContext.type == "FUNCTION_BLOCK" :
ruleManager.RunFunctionScopeRule(lexer, t.contextStack)
elif sigContext.type in ["CLASS_BLOCK", "STRUCT_BLOCK", "ENUM_BLOCK", "NAMESPACE_BLOCK", "UNION_BLOCK"]:
ruleManager.RunTypeScopeRule(lexer, t.contextStack)
ruleManager.RunRule(lexer, t.contextStack)
except Exception, e:
if nsiqcppstyle_state._nsiqcppstyle_state.verbose :
print >> sys.stderr, "Rule Error : ", t, t.contextStack, e
traceback.print_exc(file=sys.stderr)
try :
ruleManager.RunFileEndRule(lexer, os.path.basename(lexer.filename), os.path.dirname(lexer.filename))
except Exception, e:
if nsiqcppstyle_state._nsiqcppstyle_state.verbose :
print >> sys.stderr, "Rule Error : ", e
traceback.print_exc(file=sys.stderr)
| lgpl-2.1 | -8,459,777,655,855,269,000 | 32.455729 | 213 | 0.540567 | false | 4.185145 | false | false | false | 0.01235 |
sanjuro/RCJK | settings.py | 1 | 11294 | # Copyright 2009 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import os.path
###
# Django related settings
###
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# This stuff is always going to be the same for an App Engine instance
DATABASE_ENGINE = 'appengine' # 'appengine' is the only supported engine
DATABASE_NAME = '' # Not used with appengine
DATABASE_USER = '' # Not used with appengine
DATABASE_PASSWORD = '' # Not used with appengine
DATABASE_HOST = '' # Not used with appengine
DATABASE_PORT = '' # Not used with appengine
# The appengine_django code doesn't care about the address of memcached
# because it is a built in API for App Engine
CACHE_BACKEND = 'memcached://'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'I AM SO SECRET'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'middleware.domain.DomainMiddleware',
'middleware.auth.AuthenticationMiddleware',
'middleware.exception.ExceptionMiddleware',
'middleware.cache.CacheMiddleware',
'middleware.strip_whitespace.WhitespaceMiddleware',
'middleware.profile.ProfileMiddleware',
)
ROOT_URLCONF = 'urls'
# Where the templates live, you probably don't want to change this unless you
# know what you're doing
TEMPLATE_DIRS = (
os.path.dirname(__file__),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.request',
'common.context_processors.settings',
'common.context_processors.flash',
'common.context_processors.components',
)
# Only apps under INSTALLED_APPS will be automatically tested via
# `python manage.py test` and the profiling code takes this list into
# account while filtering calls
INSTALLED_APPS = (
'appengine_django',
'common',
'actor',
'api',
'channel',
'explore',
'join',
'flat',
'login',
'front',
'invite',
'install',
'confirm',
'components',
)
# We override the default test runner so that we can be Totally Awesome
TEST_RUNNER = 'common.test.runner.run_tests'
####
#
# Below this is custom for Jaiku Engine (not related to Django)
#
####
# This is a dynamic setting so that we can check whether we have been run
# locally, it is used mainly for making special testing-only tweaks. Ideally
# we wouldn't need this, but the alternatives so far have been tricky.
MANAGE_PY = os.path.exists('manage.py')
# This is the name of the site that will be used whenever it refers to itself
SITE_NAME = 'Rapid-Comments'
SUPPORT_CHANNEL = 'support'
# This is the colloquial name for an entry, mostly used for branding purposes
POST_NAME = 'Post'
# This is the name of the root user of the site
ROOT_NICK = 'headhancho@rapidcomment.appspot.com'
# This is the domain where this is installed on App Engine. It will be
# necessary to know this if you plan on enabling SSL for login and join.
GAE_DOMAIN = 'rapidcomment.appspot.com'
# Enabling this means we expect to be spending most of our time on a
# Hosted domain
HOSTED_DOMAIN_ENABLED = True
# This is the domain you intend to serve your site from, when using hosted
# domains. If SSL is enabled for login and join those requests will still
# go to the GAE_DOMAIN above.
HOSTED_DOMAIN = 'example.com'
# App Engine requires you to serve with a subdomain
DEFAULT_HOSTED_SUBDOMAIN = 'www'
NS_DOMAIN = 'example.com'
# DOMAIN will be used wherever a url to this site needs to be created
# NS_DOMAIN will be used as the domain part of actor identifiers.
# Note that changing this once you have deployed the site will likely result
# in catastrophic failure.
if HOSTED_DOMAIN_ENABLED:
DOMAIN = '%s.%s' % (DEFAULT_HOSTED_SUBDOMAIN, HOSTED_DOMAIN)
else:
DOMAIN = GAE_DOMAIN
# Subdomains aren't supported all that nicely by App Engine yet, so you
# probably won't be able to enable WILDCARD_SUBDOMAINS below, but you can
# still set up your app to use some of the static subdomains below.
# Subdomains are ignored unless HOSTED_DOMAIN_ENABLED is True.
SUBDOMAINS_ENABLED = False
WILDCARD_USER_SUBDOMAINS_ENABLED = False
# These are defined as { subdomain : url_conf, ...}
INSTALLED_SUBDOMAINS = {
'api': 'api.urls', # api-only urlconf
'm': 'urls', # default urlconf, but allow the subdomain
}
# Enable SSL support for login and join, if using HOSTED_DOMAIN_ENABLED
# this means you will be redirecting through https://GAE_DOMAIN/login
# and https://GAE_DOMAIN/join for those respective actions.
SSL_LOGIN_ENABLED = False
#
# Appearance / Theme
#
# The default theme to use
DEFAULT_THEME = 'rapidcomment'
#
# Cookie
#
# Cookie settings, pretty self explanatory, you shouldn't need to touch these.
USER_COOKIE = 'user'
PASSWORD_COOKIE = 'password'
COOKIE_DOMAIN = '.%s' % DOMAIN
COOKIE_PATH = '/'
#
# Blog
#
# Do you want /blog to redirect to your blog?
BLOG_ENABLED = False
# Where is your blog?
BLOG_URL = 'http://example.com'
BLOG_FEED_URL = 'http://example.com/feeds'
#
# API
#
# Setting this to True will make the public API accept all requests as being
# from ROOT with no regard to actual authentication.
# Never this set to True on a production site.
API_DISABLE_VERIFICATION = False
# These next three determine which OAuth Signature Methods to allow.
API_ALLOW_RSA_SHA1 = True
API_ALLOW_HMAC_SHA1 = True
API_ALLOW_PLAINTEXT = False
# These three determine whether the ROOT use should be allowed to use these
# methods, if any at all. Setting all of these to False will disable the
# ROOT user from accessing the public API
API_ALLOW_ROOT_RSA_SHA1 = True
API_ALLOW_ROOT_HMAC_SHA1 = True
API_ALLOW_ROOT_PLAINTEXT = False
# OAuth consumer key and secret values
ROOT_TOKEN_KEY = 'ROOT_TOKEN_KEY'
ROOT_TOKEN_SECRET = 'ROOT_TOKEN_SECRET'
ROOT_CONSUMER_KEY = 'ROOT_CONSUMER_KEY'
ROOT_CONSUMER_SECRET = 'ROOT_CONSUMER_SECRET'
# Allow support for legacy API authentication
API_ALLOW_LEGACY_AUTH = False
LEGACY_SECRET_KEY = 'I AM ALSO SECRET'
#
# SMS
#
# Enabling SMS will require a bit more than just making this True, please
# read the docs at http://code.google.com/p/jaikuengine/wiki/sms_support
SMS_ENABLED = False
# Most SMS vendors will provide a service that will post messages to a url
# on your site when an SMS has been received on their end, this setting allows
# you to add a secret value to that must exist in that url to prevent
# malicious use.
SMS_VENDOR_SECRET = 'SMS_VENDOR'
# Valid numbers on which you expect to receive SMS
SMS_TARGET = '00000'
# Whitelist regular expression for allowable mobile-terminated targets
SMS_MT_WHITELIST = re.compile('\+\d+')
# Blacklist regular expression for blocked mobile-terminated targets
SMS_MT_BLACKLIST = None
# Turn on test mode for SMS
SMS_TEST_ONLY = False
# Numbers to use when testing live SMS so you don't spam all your users
SMS_TEST_NUMBERS = []
#
# XMPP / IM
#
# Enabling IM will require a bit more than just making this True, please
# read the docs at http://code.google.com/p/jaikuengine/wiki/im_support
IM_ENABLED = False
# This is the id (JID) of the IM bot that you will use to communicate with
# users of the IM interface
IM_BOT = 'root@example.com'
# Turn on test mode for IM
IM_TEST_ONLY = False
# JIDs to allow when testing live XMPP so you don't spam all your users
IM_TEST_JIDS = []
# Enable to send plain text messages only. Default is to send both plain
# text and html.
IM_PLAIN_TEXT_ONLY = False
# Truncate entry title in comments. None or 140+ means no truncation.
IM_MAX_LENGTH_OF_ENTRY_TITLES_FOR_COMMENTS = 40
#
# Task Queue
#
# Enabling the queue will allow you to process posts with larger numbers
# of followers but will require you to set up a cron job that will continuously
# ping a special url to make sure the queue gets processed
QUEUE_ENABLED = True
# The secret to use for your cron job that processes your queue
QUEUE_VENDOR_SECRET = 'SECRET'
#
# Throttling Config
#
# This will control the max number of SMS to send over a 30-day period
THROTTLE_SMS_GLOBAL_MONTH = 10000
# Settings for remote services
IMAGE_UPLOAD_ENABLED = False
IMAGE_UPLOAD_URL = 'upload.example.com'
# Settings for Google Contacts import
GOOGLE_CONTACTS_IMPORT_ENABLED = False
FEEDS_ENABLED = False
MARK_AS_SPAM_ENABLED = True
PRESS_ENABLED = False
HIDE_COMMENTS_ENABLED = True
MULTIADMIN_ENABLED = False
PRIVATE_CHANNELS_ENABLED = False
MARKDOWN_ENABLED = False
# Lists nicks of users participating in conversations underneath comment
# areas for posts. Clicking list items inserts @nicks into comment box.
# The list shows a maximum of 25 nicks.
COMMENT_QUICKLINKS_ENABLED = True
# If enabled, adds support for using access keys 1-9 to insert @nicks into
# comment box. Requires COMMENT_QUICKLINKS_ENABLED.
COMMENT_QUICKLINKS_ACCESSKEYS_ENABLED = False
PROFILE_DB = False
# Limit of avatar photo size in kilobytes
MAX_AVATAR_PHOTO_KB = 200
MAX_ACTIVATIONS = 10
# Email Test mode
EMAIL_TEST_ONLY = False
# Allowed email addresses for testing
EMAIL_TEST_ADDRESSES = []
# Email limiting, if this is set it will restrict users to those with
# email addresses in this domain
EMAIL_LIMIT_DOMAIN = None
# Things to measure to taste
MAX_COMMENT_LENGTH = 2000
# Gdata Stuff
GDATA_CONSUMER_KEY = ''
GDATA_CONSUMER_SECRET = ''
def default_email_sender():
try:
return os.environ['DJANGO_DEFAULT_FROM_EMAIL']
except KeyError:
return 'termie@google.com'
DEFAULT_FROM_EMAIL = default_email_sender()
DEFAULT_UNITTEST_TO_EMAIL = 'unittests@example.com'
PROFILING_DATA_PATH = 'profiling/prof_db.csv'
# Set up the settings for the dev server if we are running it
if MANAGE_PY:
try:
from dev_settings import *
except ImportError:
pass
# Allow local overrides, useful for testing during development
try:
from local_settings import *
except ImportError:
pass
| apache-2.0 | -8,032,032,820,507,803,000 | 27.024814 | 79 | 0.735523 | false | 3.494431 | true | false | false | 0.002302 |
robhudson/zamboni | apps/reviews/forms.py | 6 | 5222 | import re
from urllib2 import unquote
from django import forms
from django.forms.models import modelformset_factory
import happyforms
from tower import ugettext_lazy as _lazy
from bleach import TLDS
from quieter_formset.formset import BaseModelFormSet
import amo
import reviews
from .models import Review, ReviewFlag
from amo.utils import raise_required
from editors.models import ReviewerScore
class ReviewReplyForm(forms.Form):
title = forms.CharField(required=False)
body = forms.CharField(widget=forms.Textarea(attrs={'rows': 3}))
def clean_body(self):
body = self.cleaned_data.get('body', '')
# Whitespace is not a review!
if not body.strip():
raise_required()
return body
class ReviewForm(ReviewReplyForm):
rating = forms.ChoiceField(zip(range(1, 6), range(1, 6)))
flags = re.I | re.L | re.U | re.M
# This matches the following three types of patterns:
# http://... or https://..., generic domain names, and IPv4
# octets. It does not match IPv6 addresses or long strings such as
# "example dot com".
link_pattern = re.compile('((://)|' # Protocols (e.g.: http://)
'((\d{1,3}\.){3}(\d{1,3}))|'
'([0-9a-z\-%%]+\.(%s)))' % '|'.join(TLDS),
flags)
def _post_clean(self):
# Unquote the body in case someone tries 'example%2ecom'.
data = unquote(self.cleaned_data.get('body', ''))
if '<br>' in data:
self.cleaned_data['body'] = re.sub('<br>', '\n', data)
if self.link_pattern.search(data) is not None:
self.cleaned_data['flag'] = True
self.cleaned_data['editorreview'] = True
class ReviewFlagForm(forms.ModelForm):
class Meta:
model = ReviewFlag
fields = ('flag', 'note', 'review', 'user')
def clean(self):
data = super(ReviewFlagForm, self).clean()
if 'note' in data and data['note'].strip():
data['flag'] = ReviewFlag.OTHER
return data
class BaseReviewFlagFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
self.form = ModerateReviewFlagForm
self.request = kwargs.pop('request', None)
super(BaseReviewFlagFormSet, self).__init__(*args, **kwargs)
def save(self):
from reviews.helpers import user_can_delete_review
for form in self.forms:
if form.cleaned_data and user_can_delete_review(self.request,
form.instance):
action = int(form.cleaned_data['action'])
is_flagged = (form.instance.reviewflag_set.count() > 0)
if action != reviews.REVIEW_MODERATE_SKIP: # Delete flags.
for flag in form.instance.reviewflag_set.all():
flag.delete()
review = form.instance
addon = review.addon
if action == reviews.REVIEW_MODERATE_DELETE:
review_addon = review.addon
review_id = review.id
review.delete()
amo.log(amo.LOG.DELETE_REVIEW, review_addon, review_id,
details=dict(title=unicode(review.title),
body=unicode(review.body),
addon_id=addon.id,
addon_title=unicode(addon.name),
is_flagged=is_flagged))
if self.request:
ReviewerScore.award_moderation_points(
self.request.amo_user, addon, review_id)
elif action == reviews.REVIEW_MODERATE_KEEP:
review.editorreview = False
review.save()
amo.log(amo.LOG.APPROVE_REVIEW, review.addon, review,
details=dict(title=unicode(review.title),
body=unicode(review.body),
addon_id=addon.id,
addon_title=unicode(addon.name),
is_flagged=is_flagged))
if self.request:
ReviewerScore.award_moderation_points(
self.request.amo_user, addon, review.id)
class ModerateReviewFlagForm(happyforms.ModelForm):
action_choices = [(reviews.REVIEW_MODERATE_KEEP,
_lazy(u'Keep review; remove flags')),
(reviews.REVIEW_MODERATE_SKIP, _lazy(u'Skip for now')),
(reviews.REVIEW_MODERATE_DELETE,
_lazy(u'Delete review'))]
action = forms.ChoiceField(choices=action_choices, required=False,
initial=0, widget=forms.RadioSelect())
class Meta:
model = Review
fields = ('action',)
ReviewFlagFormSet = modelformset_factory(Review, extra=0,
form=ModerateReviewFlagForm,
formset=BaseReviewFlagFormSet)
| bsd-3-clause | -1,434,801,852,707,504,000 | 37.970149 | 77 | 0.531406 | false | 4.330017 | false | false | false | 0.000957 |
Hernanarce/pelisalacarta | python/main-ui/windowtools.py | 5 | 4668 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
#------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------
ACTION_MOVE_LEFT = 1 #Dpad Left
ACTION_MOVE_RIGHT = 2 #Dpad Right
ACTION_MOVE_UP = 3 #Dpad Up
ACTION_MOVE_DOWN = 4 #Dpad Down
ACTION_PAGE_UP = 5 #Left trigger
ACTION_PAGE_DOWN = 6 #Right trigger
ACTION_SELECT_ITEM = 7 #'A'
ACTION_HIGHLIGHT_ITEM = 8
ACTION_PARENT_DIR = 9 #'B'
ACTION_PREVIOUS_MENU = 10 #'Back'
ACTION_SHOW_INFO = 11
ACTION_PAUSE = 12
ACTION_STOP = 13 #'Start'
ACTION_NEXT_ITEM = 14
ACTION_PREV_ITEM = 15
ACTION_XBUTTON = 18 #'X'
ACTION_YBUTTON = 34 #'Y'
ACTION_MOUSEMOVE = 90 # Mouse has moved
ACTION_MOUSEMOVE2 = 107 # Mouse has moved
ACTION_MOUSE_LEFT_CLICK = 100
ACTION_PREVIOUS_MENU2 = 92 #'Back'
ACTION_CONTEXT_MENU = 117 # pops up the context menu
ACTION_CONTEXT_MENU2 = 229 # pops up the context menu (remote control "title" button)
ACTION_TOUCH_TAP = 401
ACTION_NOOP = 999
import plugintools
import os
import xbmc
# TODO: Definir fuentes small, medium, large etc por su tamaño y que busque la más apropiada (probar primero si se puede cambiar fuente en caliente en el skin)
def get_fonts():
plugintools.log("get_fonts")
skin = xbmc.getSkinDir()
plugintools.log("get_fonts skin="+skin)
try:
skin_file = os.path.join(xbmc.translatePath('special://skin/1080i'), 'Font.xml')
plugintools.log("skin_file="+skin_file)
available_fonts = plugintools.read( skin_file, "r")
except:
try:
skin_file = os.path.join(xbmc.translatePath('special://skin/720p'), 'Font.xml')
plugintools.log("skin_file="+skin_file)
available_fonts = plugintools.read( skin_file, "r")
except:
available_fonts = ""
plugintools.log("get_fonts available_fonts="+repr(available_fonts))
if "confluence" in skin or "estuary" in skin or "refocus" in skin:
return {"10": "font10", "12": "font12", "16": "font16", "24": "font24_title", "30": "font30"}
elif "aeonmq" in skin:
return {"10": "font_14", "12": "font_16", "16": "font_20", "24": "font_24", "30": "font_30"}
elif "madnox" in skin:
return {"10": "Font_Reg22", "12": "Font_Reg26", "16": "Font_Reg32", "24": "Font_Reg38", "30": "Font_ShowcaseMainLabel2_Caps"}
'''
elif available_fonts:
fuentes = plugintools.find_multiple_matches(data_font, "<name>([^<]+)<\/name>(?:<![^<]+>|)\s*<filename>[^<]+<\/filename>\s*<size>(\d+)<\/size>")
sizes = []
try:
for name, size in fuentes:
size = int(size)
sizes.append([size, name])
sizes.sort()
fonts["10"] = sizes[0][1].lower()
check = False
if not 12 in sizes:
for size, name in sizes:
if size != fonts["10"]:
fonts["12"] = name.lower()
check = True
break
for size, name in sizes:
if size == 12 and not check:
fonts["12"] = name.lower()
elif size == 16:
fonts["16"] = name.lower()
elif size == 24:
fonts["24"] = name.lower()
elif size == 30:
fonts["30"] = name.lower()
break
elif size > 30 and size <= 33:
fonts["30"] = name.lower()
break
except:
pass
'''
return {"10": "font10", "12": "font12", "16": "font16", "24": "font24", "30": "font30"}
| gpl-3.0 | 2,203,916,595,311,384,300 | 38.542373 | 159 | 0.551865 | false | 3.586472 | false | false | false | 0.01886 |
felixbuenemann/sentry | src/sentry/migrations/0197_auto__del_accessgroup__del_unique_accessgroup_team_name.py | 18 | 39109 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'AccessGroup', fields ['team', 'name']
db.delete_unique(u'sentry_accessgroup', ['team_id', 'name'])
# Deleting model 'AccessGroup'
db.delete_table(u'sentry_accessgroup')
# Removing M2M table for field members on 'AccessGroup'
db.delete_table(db.shorten_name(u'sentry_accessgroup_members'))
# Removing M2M table for field projects on 'AccessGroup'
db.delete_table(db.shorten_name(u'sentry_accessgroup_projects'))
def backwards(self, orm):
# Adding model 'AccessGroup'
db.create_table(u'sentry_accessgroup', (
('type', self.gf('sentry.db.models.fields.bounded.BoundedIntegerField')(default=50)),
('managed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('team', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Team'])),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('data', self.gf('sentry.db.models.fields.gzippeddict.GzippedDictField')(null=True, blank=True)),
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
))
db.send_create_signal('sentry', ['AccessGroup'])
# Adding M2M table for field members on 'AccessGroup'
m2m_table_name = db.shorten_name(u'sentry_accessgroup_members')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('accessgroup', models.ForeignKey(orm['sentry.accessgroup'], null=False)),
('user', models.ForeignKey(orm['sentry.user'], null=False))
))
db.create_unique(m2m_table_name, ['accessgroup_id', 'user_id'])
# Adding M2M table for field projects on 'AccessGroup'
m2m_table_name = db.shorten_name(u'sentry_accessgroup_projects')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('accessgroup', models.ForeignKey(orm['sentry.accessgroup'], null=False)),
('project', models.ForeignKey(orm['sentry.project'], null=False))
))
db.create_unique(m2m_table_name, ['accessgroup_id', 'project_id'])
# Adding unique constraint on 'AccessGroup', fields ['team', 'name']
db.create_unique(u'sentry_accessgroup', ['team_id', 'name'])
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'),)"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | bsd-3-clause | 6,821,396,359,497,258,000 | 84.956044 | 217 | 0.575545 | false | 3.853863 | false | false | false | 0.007799 |
xaxa89/mitmproxy | mitmproxy/tools/console/statusbar.py | 1 | 9580 | import os.path
import urwid
from mitmproxy.tools.console import common
from mitmproxy.tools.console import pathedit
from mitmproxy.tools.console import signals
import mitmproxy.tools.console.master # noqa
class PromptPath:
def __init__(self, callback, args):
self.callback, self.args = callback, args
def __call__(self, pth):
if not pth:
return
pth = os.path.expanduser(pth)
try:
return self.callback(pth, *self.args)
except IOError as v:
signals.status_message.send(message=v.strerror)
class PromptStub:
def __init__(self, callback, args):
self.callback, self.args = callback, args
def __call__(self, txt):
return self.callback(txt, *self.args)
class ActionBar(urwid.WidgetWrap):
def __init__(self):
urwid.WidgetWrap.__init__(self, None)
self.clear()
signals.status_message.connect(self.sig_message)
signals.status_prompt.connect(self.sig_prompt)
signals.status_prompt_path.connect(self.sig_path_prompt)
signals.status_prompt_onekey.connect(self.sig_prompt_onekey)
self.last_path = ""
self.prompting = None
self.onekey = False
self.pathprompt = False
def sig_message(self, sender, message, expire=1):
if self.prompting:
return
w = urwid.Text(message)
self._w = w
if expire:
def cb(*args):
if w == self._w:
self.clear()
signals.call_in.send(seconds=expire, callback=cb)
def prep_prompt(self, p):
return p.strip() + ": "
def sig_prompt(self, sender, prompt, text, callback, args=()):
signals.focus.send(self, section="footer")
self._w = urwid.Edit(self.prep_prompt(prompt), text or "")
self.prompting = PromptStub(callback, args)
def sig_path_prompt(self, sender, prompt, callback, args=()):
signals.focus.send(self, section="footer")
self._w = pathedit.PathEdit(
self.prep_prompt(prompt),
os.path.dirname(self.last_path)
)
self.pathprompt = True
self.prompting = PromptPath(callback, args)
def sig_prompt_onekey(self, sender, prompt, keys, callback, args=()):
"""
Keys are a set of (word, key) tuples. The appropriate key in the
word is highlighted.
"""
signals.focus.send(self, section="footer")
prompt = [prompt, " ("]
mkup = []
for i, e in enumerate(keys):
mkup.extend(common.highlight_key(e[0], e[1]))
if i < len(keys) - 1:
mkup.append(",")
prompt.extend(mkup)
prompt.append(")? ")
self.onekey = set(i[1] for i in keys)
self._w = urwid.Edit(prompt, "")
self.prompting = PromptStub(callback, args)
def selectable(self):
return True
def keypress(self, size, k):
if self.prompting:
if k == "esc":
self.prompt_done()
elif self.onekey:
if k == "enter":
self.prompt_done()
elif k in self.onekey:
self.prompt_execute(k)
elif k == "enter":
self.prompt_execute(self._w.get_edit_text())
else:
if common.is_keypress(k):
self._w.keypress(size, k)
else:
return k
def clear(self):
self._w = urwid.Text("")
self.prompting = None
def prompt_done(self):
self.prompting = None
self.onekey = False
self.pathprompt = False
signals.status_message.send(message="")
signals.focus.send(self, section="body")
def prompt_execute(self, txt):
if self.pathprompt:
self.last_path = txt
p = self.prompting
self.prompt_done()
msg = p(txt)
if msg:
signals.status_message.send(message=msg, expire=1)
class StatusBar(urwid.WidgetWrap):
def __init__(
self, master: "mitmproxy.tools.console.master.ConsoleMaster", helptext
) -> None:
self.master = master
self.helptext = helptext
self.ib = urwid.WidgetWrap(urwid.Text(""))
super().__init__(urwid.Pile([self.ib, self.master.ab]))
signals.update_settings.connect(self.sig_update)
signals.flowlist_change.connect(self.sig_update)
signals.footer_help.connect(self.sig_footer_help)
master.options.changed.connect(self.sig_update)
master.view.focus.sig_change.connect(self.sig_update)
self.redraw()
def sig_footer_help(self, sender, helptext):
self.helptext = helptext
self.redraw()
def sig_update(self, sender, updated=None):
self.redraw()
def keypress(self, *args, **kwargs):
return self.master.ab.keypress(*args, **kwargs)
def get_status(self):
r = []
sreplay = self.master.addons.get("serverplayback")
creplay = self.master.addons.get("clientplayback")
if len(self.master.options.setheaders):
r.append("[")
r.append(("heading_key", "H"))
r.append("eaders]")
if len(self.master.options.replacements):
r.append("[")
r.append(("heading_key", "R"))
r.append("eplacing]")
if creplay.count():
r.append("[")
r.append(("heading_key", "cplayback"))
r.append(":%s]" % creplay.count())
if sreplay.count():
r.append("[")
r.append(("heading_key", "splayback"))
r.append(":%s]" % sreplay.count())
if self.master.options.ignore_hosts:
r.append("[")
r.append(("heading_key", "I"))
r.append("gnore:%d]" % len(self.master.options.ignore_hosts))
if self.master.options.tcp_hosts:
r.append("[")
r.append(("heading_key", "T"))
r.append("CP:%d]" % len(self.master.options.tcp_hosts))
if self.master.options.intercept:
r.append("[")
r.append(("heading_key", "i"))
r.append(":%s]" % self.master.options.intercept)
if self.master.options.view_filter:
r.append("[")
r.append(("heading_key", "f"))
r.append(":%s]" % self.master.options.view_filter)
if self.master.options.stickycookie:
r.append("[")
r.append(("heading_key", "t"))
r.append(":%s]" % self.master.options.stickycookie)
if self.master.options.stickyauth:
r.append("[")
r.append(("heading_key", "u"))
r.append(":%s]" % self.master.options.stickyauth)
if self.master.options.default_contentview != "auto":
r.append("[")
r.append(("heading_key", "M"))
r.append(":%s]" % self.master.options.default_contentview)
if self.master.options.has_changed("console_order"):
r.append("[")
r.append(("heading_key", "o"))
r.append(":%s]" % self.master.options.console_order)
opts = []
if self.master.options.anticache:
opts.append("anticache")
if self.master.options.anticomp:
opts.append("anticomp")
if self.master.options.showhost:
opts.append("showhost")
if not self.master.options.refresh_server_playback:
opts.append("norefresh")
if self.master.options.replay_kill_extra:
opts.append("killextra")
if not self.master.options.upstream_cert:
opts.append("no-upstream-cert")
if self.master.options.console_focus_follow:
opts.append("following")
if self.master.options.stream_large_bodies:
opts.append(self.master.options.stream_large_bodies)
if opts:
r.append("[%s]" % (":".join(opts)))
if self.master.options.mode != "regular":
r.append("[%s]" % self.master.options.mode)
if self.master.options.scripts:
r.append("[")
r.append(("heading_key", "s"))
r.append("cripts:%s]" % len(self.master.options.scripts))
if self.master.options.streamfile:
r.append("[W:%s]" % self.master.options.streamfile)
return r
def redraw(self):
fc = len(self.master.view)
if self.master.view.focus.flow is None:
offset = 0
else:
offset = self.master.view.focus.index + 1
if self.master.options.console_order_reversed:
arrow = common.SYMBOL_UP
else:
arrow = common.SYMBOL_DOWN
marked = ""
if self.master.view.show_marked:
marked = "M"
t = [
('heading', ("%s %s [%s/%s]" % (arrow, marked, offset, fc)).ljust(11)),
]
if self.master.server.bound:
host = self.master.server.address[0]
if host == "0.0.0.0":
host = "*"
boundaddr = "[%s:%s]" % (host, self.master.server.address[1])
else:
boundaddr = ""
t.extend(self.get_status())
status = urwid.AttrWrap(urwid.Columns([
urwid.Text(t),
urwid.Text(
[
self.helptext,
boundaddr
],
align="right"
),
]), "heading")
self.ib._w = status
def selectable(self):
return True
| mit | 7,438,807,596,316,050,000 | 31.808219 | 83 | 0.541754 | false | 3.767204 | false | false | false | 0.000209 |
varunmittal91/Docker-DNS | main.py | 1 | 4625 | import json
from time import sleep
import os
from subprocess import call
import getopt
from sys import argv, exit
import requests
container_list = {}
def add_dns(name, hostname, ip, is_slave=None):
if is_slave:
r = requests.put("http://%s/%s/%s/%s/" % (is_slave, name, hostname, ip))
if r.status_code != 200:
exit(0)
else:
file = open("/etc/dnsmasq.d/0%s" % hostname, "w+")
file.write("host-record=%s,%s" % (hostname,ip))
file.close()
def remove_dns(hostname, is_slave=None):
if is_slave:
r = requests.delete("http://%s/%s/" % (is_slave, hostname))
if r.status_code != 200:
exit(0)
else:
return call(["rm", "/etc/dnsmasq.d/0%s" % hostname])
def get_response(docker_api, api_path):
r = requests.get("%s/%s" % (docker_api, api_path))
if r.status_code == 200:
return json.loads(r.content)
def update_dns(is_slave=None, docker_api=None):
containers = get_response(docker_api, 'containers/json?all=1')
dnsmasq_required_restart = False
listed_ids = []
for container in containers:
listed_ids.append(container['Id'])
is_up = False
if container['Status'].startswith('Up'):
is_up = True
try:
status = container_list[container['Id']]
if is_up:
container_status = get_response(docker_api, 'containers/%s/json' % container['Id'])
if container_status['NetworkSettings']['IPAddress'] != status['ip']:
add_dns(name=container_status['Name'][1:], hostname=container_status['Config']['Hostname'],
ip=container_status['NetworkSettings']['IPAddress'], is_slave=is_slave)
status['ip'] = container_status['NetworkSettings']['IPAddress']
dnsmasq_required_restart = True
except KeyError:
if is_up:
container_status = get_response(docker_api, 'containers/%s/json' % container['Id'])
add_dns(name=container_status['Name'][1:], hostname=container_status['Config']['Hostname'],
ip=container_status['NetworkSettings']['IPAddress'], is_slave=is_slave)
dnsmasq_required_restart = True
container_list[container['Id']] = {'hostname': container_status['Config']['Hostname'],
'ip': container_status['NetworkSettings']['IPAddress']}
stopped = set(container_list.keys()) - set(listed_ids)
if len(stopped) > 0:
dnsmasq_required_restart = True
for id in stopped:
remove_dns(container_list[id]['hostname'], is_slave=is_slave)
del container_list[id]
if not is_slave and dnsmasq_required_restart:
call(['/bin/restartdns.sh'])
optlist,args = getopt.getopt(argv[1:], '-d:-m:', ['dns=', 'master=', 'docker_api='])
names = ["127.0.0.1"]
is_slave = False
docker_api = None
for opt,value in optlist:
if opt in ['d', '--dns']:
names.extend(value.split(','))
elif opt in ['m', '--master']:
is_slave = value
elif opt in ['--docker_api']:
docker_api = value
if not is_slave:
resolve_file = open("/etc/resolv.dnsmasq.conf", "w+")
names.append('8.8.8.8')
[resolve_file.write("nameserver %s\n" % name) for name in names]
resolve_file.close()
import SimpleHTTPServer
import SocketServer
PORT = 538
server_address = ('0.0.0.0', PORT)
class UpdateDNS(SimpleHTTPServer.SimpleHTTPRequestHandler):
def _set_headers(self, status_code):
self.send_response(status_code)
self.end_headers()
def do_GET(self): pass
def do_DELETE(self):
hostname = self.path[1:].split('/')[0]
if len(hostname) > 0:
if remove_dns(hostname) == 0:
call(['/bin/restartdns.sh'])
self._set_headers(200)
else:
self._set_headers(500)
self.wfile.write("")
def do_PUT(self):
try:
name,hostname,ip = self.path[1:].split('/')[0:3]
add_dns(name,hostname,ip)
call(['/bin/restartdns.sh'])
self._set_headers(200)
except ValueError:
self._set_headers(500)
self.wfile.write("")
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer(server_address, UpdateDNS)
httpd.serve_forever()
else:
while 1:
update_dns(is_slave, docker_api)
sleep(10)
| gpl-3.0 | -3,164,082,754,785,259,000 | 36.298387 | 111 | 0.562595 | false | 3.769356 | false | false | false | 0.007351 |
DonBeo/scikit-learn | sklearn/preprocessing/imputation.py | 22 | 14057 | # Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils import as_float_array
from ..utils.fixes import astype
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Imputer',
]
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or np.isnan(value_to_mask):
return np.isnan(X)
else:
return X == value_to_mask
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class Imputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Parameters
----------
missing_values : integer or "NaN", optional (default="NaN")
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For missing values encoded as np.nan,
use the string value "NaN".
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
the axis.
- If "median", then replace missing values using the median along
the axis.
- If "most_frequent", then replace missing using the most frequent
value along the axis.
axis : integer, optional (default=0)
The axis along which to impute.
- If `axis=0`, then impute along columns.
- If `axis=1`, then impute along rows.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is sparse and `missing_values=0`;
- If `axis=0` and X is encoded as a CSR matrix;
- If `axis=1` and X is encoded as a CSC matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature if axis == 0.
Notes
-----
- When ``axis=0``, columns which only contained missing values at `fit`
are discarded upon `transform`.
- When ``axis=1``, an exception is raised if there are rows for which it is
not possible to fill in the missing values (e.g., because they only
contain missing values).
"""
def __init__(self, missing_values="NaN", strategy="mean",
axis=0, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.axis = axis
self.verbose = verbose
self.copy = copy
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
# Check parameters
allowed_strategies = ["mean", "median", "most_frequent"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.axis not in [0, 1]:
raise ValueError("Can only impute missing values on axis 0 and 1, "
" got axis={0}".format(self.axis))
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data will be computed in transform()
# when the imputation is done per sample (i.e., when axis=1).
if self.axis == 0:
X = check_array(X, accept_sparse='csc', dtype=np.float64,
force_all_finite=False)
if sparse.issparse(X):
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
return self
def _sparse_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on sparse data."""
# Imputation is done "by column", so if we want to do it
# by row we only need to convert the matrix to csr format.
if axis == 1:
X = X.tocsr()
else:
X = X.tocsc()
# Count the zeros
if missing_values == 0:
n_zeros_axis = np.zeros(X.shape[not axis], dtype=int)
else:
n_zeros_axis = X.shape[axis] - np.diff(X.indptr)
# Mean
if strategy == "mean":
if missing_values != 0:
n_non_missing = n_zeros_axis
# Mask the missing elements
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.logical_not(mask_missing_values)
# Sum only the valid elements
new_data = X.data.copy()
new_data[mask_missing_values] = 0
X = sparse.csc_matrix((new_data, X.indices, X.indptr),
copy=False)
sums = X.sum(axis=0)
# Count the elements != 0
mask_non_zeros = sparse.csc_matrix(
(mask_valids.astype(np.float64),
X.indices,
X.indptr), copy=False)
s = mask_non_zeros.sum(axis=0)
n_non_missing = np.add(n_non_missing, s)
else:
sums = X.sum(axis=axis)
n_non_missing = np.diff(X.indptr)
# Ignore the error, columns with a np.nan statistics_
# are not an error at this point. These columns will
# be removed in transform
with np.errstate(all="ignore"):
return np.ravel(sums) / np.ravel(n_non_missing)
# Median + Most frequent
else:
# Remove the missing values, for each column
columns_all = np.hsplit(X.data, X.indptr[1:-1])
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.hsplit(np.logical_not(mask_missing_values),
X.indptr[1:-1])
# astype necessary for bug in numpy.hsplit before v1.9
columns = [col[astype(mask, bool, copy=False)]
for col, mask in zip(columns_all, mask_valids)]
# Median
if strategy == "median":
median = np.empty(len(columns))
for i, column in enumerate(columns):
median[i] = _get_median(column, n_zeros_axis[i])
return median
# Most frequent
elif strategy == "most_frequent":
most_frequent = np.empty(len(columns))
for i, column in enumerate(columns):
most_frequent[i] = _most_frequent(column,
0,
n_zeros_axis[i])
return most_frequent
def _dense_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on dense data."""
X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5):
# In old versions of numpy, calling a median on an array
# containing nans returns nan. This is different is
# recent versions of numpy, which we want to mimic
masked_X.mask = np.logical_or(masked_X.mask,
np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if it's frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
if axis == 0:
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The input data to complete.
"""
if self.axis == 0:
check_is_fitted(self, 'statistics_')
# Copy just once
X = as_float_array(X, copy=self.copy, force_all_finite=False)
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data need to be recomputed
# when the imputation is done per sample
if self.axis == 1:
X = check_array(X, accept_sparse='csr', force_all_finite=False,
copy=False)
if sparse.issparse(X):
statistics = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
statistics = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
X = check_array(X, accept_sparse='csc', force_all_finite=False,
copy=False)
statistics = self.statistics_
# Delete the invalid rows/columns
invalid_mask = np.isnan(statistics)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.where(valid_mask)[0]
missing = np.arange(X.shape[not self.axis])[invalid_mask]
if self.axis == 0 and invalid_mask.any():
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
elif self.axis == 1 and invalid_mask.any():
raise ValueError("Some rows only contain "
"missing values: %s" % missing)
# Do actual imputation
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype)
else:
if sparse.issparse(X):
X = X.toarray()
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=self.axis)
values = np.repeat(valid_statistics, n_missing)
if self.axis == 0:
coordinates = np.where(mask.transpose())[::-1]
else:
coordinates = mask
X[coordinates] = values
return X
| bsd-3-clause | -3,294,317,596,899,334,000 | 36.485333 | 79 | 0.531835 | false | 4.268752 | false | false | false | 0 |
guijomatos/SickRage | sickbeard/providers/hdbits.py | 7 | 6963 | # This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import datetime
import urllib
import generic
from sickbeard import classes
from sickbeard import logger, tvcache
from sickrage.helper.exceptions import AuthException
try:
import json
except ImportError:
import simplejson as json
class HDBitsProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "HDBits")
self.supportsBacklog = True
self.public = False
self.enabled = False
self.username = None
self.passkey = None
self.ratio = None
self.cache = HDBitsCache(self)
self.urls = {'base_url': 'https://hdbits.org',
'search': 'https://hdbits.org/api/torrents',
'rss': 'https://hdbits.org/api/torrents',
'download': 'https://hdbits.org/download.php?'
}
self.url = self.urls['base_url']
def isEnabled(self):
return self.enabled
def _checkAuth(self):
if not self.username or not self.passkey:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _checkAuthFromData(self, parsedJSON):
if 'status' in parsedJSON and 'message' in parsedJSON:
if parsedJSON.get('status') == 5:
logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['message'],
logger.DEBUG)
raise AuthException(
"Your authentication credentials for " + self.name + " are incorrect, check your config.")
return True
def _get_season_search_strings(self, ep_obj):
season_search_string = [self._make_post_data_JSON(show=ep_obj.show, season=ep_obj)]
return season_search_string
def _get_episode_search_strings(self, ep_obj, add_string=''):
episode_search_string = [self._make_post_data_JSON(show=ep_obj.show, episode=ep_obj)]
return episode_search_string
def _get_title_and_url(self, item):
title = item['name']
if title:
title = self._clean_title_from_provider(title)
url = self.urls['download'] + urllib.urlencode({'id': item['id'], 'passkey': self.passkey})
return (title, url)
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
self._checkAuth()
logger.log(u"Search url: " + self.urls['search'] + " search_params: " + search_params,
logger.DEBUG)
parsedJSON = self.getURL(self.urls['search'], post_data=search_params, json=True)
if not parsedJSON:
return []
if self._checkAuthFromData(parsedJSON):
if parsedJSON and 'data' in parsedJSON:
items = parsedJSON['data']
else:
logger.log(u"Resulting JSON from " + self.name + " isn't correct, not parsing it", logger.ERROR)
items = []
for item in items:
results.append(item)
return results
def findPropers(self, search_date=None):
results = []
search_terms = [' proper ', ' repack ']
for term in search_terms:
for item in self._doSearch(self._make_post_data_JSON(search_term=term)):
if item['utadded']:
try:
result_date = datetime.datetime.fromtimestamp(int(item['utadded']))
except:
result_date = None
if result_date:
if not search_date or result_date > search_date:
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, result_date, self.show))
return results
def _make_post_data_JSON(self, show=None, episode=None, season=None, search_term=None):
post_data = {
'username': self.username,
'passkey': self.passkey,
'category': [2],
# TV Category
}
if episode:
if show.air_by_date:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': str(episode.airdate).replace('-', '|')
}
elif show.sports:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': episode.airdate.strftime('%b')
}
elif show.anime:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': "%i" % int(episode.scene_absolute_number)
}
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': episode.scene_season,
'episode': episode.scene_episode
}
if season:
if show.air_by_date or show.sports:
post_data['tvdb'] = {
'id': show.indexerid,
'season': str(season.airdate)[:7],
}
elif show.anime:
post_data['tvdb'] = {
'id': show.indexerid,
'season': "%d" % season.scene_absolute_number,
}
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': season.scene_season,
}
if search_term:
post_data['search'] = search_term
return json.dumps(post_data)
def seedRatio(self):
return self.ratio
class HDBitsCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll HDBits every 15 minutes max
self.minTime = 15
def _getRSSData(self):
results = []
try:
parsedJSON = self.provider.getURL(self.provider.urls['rss'], post_data=self.provider._make_post_data_JSON(), json=True)
if self.provider._checkAuthFromData(parsedJSON):
results = parsedJSON['data']
except:
pass
return {'entries': results}
provider = HDBitsProvider()
| gpl-3.0 | -2,814,682,117,040,107,000 | 31.087558 | 131 | 0.548183 | false | 4.184495 | false | false | false | 0.002585 |
clairetang6/bokeh | bokeh/command/subcommands/html.py | 11 | 2523 | '''
To generate a standalone HTML page for a Bokeh application from a single
Python script, pass the script name to ``bokeh html`` on the command
line:
.. code-block:: sh
bokeh html app_script.py
The generated HTML will be saved in the current working directory with
the name ``app_script.html``.
It is also possible to run the same commmand with jupyter notebooks:
.. code-block:: sh
bokeh html app_notebook.ipynb
This will generate an HTML file named ``app_notebook.html`` just like
with a python script.
Applications can also be created from directories. The directory should
contain a ``main.py`` (and any other helper modules that are required) as
well as any additional assets (e.g., theme files). Pass the directory name
to ``bokeh html`` to generate the HTML:
.. code-block:: sh
bokeh html app_dir
It is possible to generate HTML pages for multiple applications at once:
.. code-block:: sh
bokeh html app_script.py app_dir
If you would like to automatically open a browser to display the HTML
page(s), you can pass the ``--show`` option on the command line:
.. code-block:: sh
bokeh html app_script.py app_dir --show
This will open two pages, for ``app_script.html`` and ``app_dir.html``,
respectively.
.. warning::
Applications that use ``on_change`` callbacks require using the Bokeh
server to execute the callback code. Though the application may render,
the callbacks will not function. See :ref:`userguide_cli_serve` for
more information on using ``bokeh serve``.
'''
from __future__ import absolute_import
from bokeh.resources import Resources
from bokeh.embed import standalone_html_page_for_models
from .file_output import FileOutputSubcommand
class HTML(FileOutputSubcommand):
''' Subcommand to output applications as standalone HTML files.
'''
name = "html"
extension = "html"
help = "Create standalone HTML files for one or more applications"
args = (
FileOutputSubcommand.files_arg("HTML"),
(
'--show', dict(
action='store_true',
help="Open generated file(s) in a browser"
)),
) + FileOutputSubcommand.other_args()
def after_write_file(self, args, filename, doc):
if args.show:
from bokeh.util.browser import view
view(filename)
def file_contents(self, args, doc):
resources = Resources(mode="cdn", root_dir=None)
return standalone_html_page_for_models(doc, resources=resources, title=None)
| bsd-3-clause | 1,447,480,294,361,713,700 | 26.725275 | 84 | 0.697582 | false | 3.966981 | false | false | false | 0.001982 |
phuongtg/zerorpc-python | zerorpc/socket.py | 134 | 1737 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .context import Context
from .events import Events
class SocketBase(object):
def __init__(self, zmq_socket_type, context=None):
self._context = context or Context.get_instance()
self._events = Events(zmq_socket_type, context)
def close(self):
self._events.close()
def connect(self, endpoint, resolve=True):
return self._events.connect(endpoint, resolve)
def bind(self, endpoint, resolve=True):
return self._events.bind(endpoint, resolve)
| mit | -1,702,668,845,676,752,600 | 39.395349 | 81 | 0.739781 | false | 4.236585 | false | false | false | 0.001727 |
stefanaspect/microchat | srv.py | 1 | 1397 | from bottle import app, route, request, response, run
import cgi
import time
queue = []
who = []
tmpl = None
MAX = 10
@route('/')
def index():
global tmpl
if tmpl is None:
with file('index.html', 'r') as fh:
tmpl = fh.read()
return tmpl
@route('/message', method=['get', 'post'])
def message():
global queue
if request.method == "POST":
data = request.json
ts = time.time()
date = time.ctime()
user = data['user']
mesg = data.get('mesg')
code = data.get('code')
# only store the last MAX items in the queue...
if len(queue) > MAX:
tmp = len(queue) - MAX
queue = queue[tmp:len(queue)]
queue.append(dict(user=user, ts=ts, date=date, mesg=mesg, code=code))
return {"queue": queue}
else:
return {"queue": queue}
@route('/who', method=['post'])
def updateWho():
global who
data = request.json
user = data['user']
ts = time.time()
ip = request.remote_addr
who.append(dict(user=user, ts=ts, ip=ip))
# Check timestamp from last update from user and purge.
for u in who:
lapse = time.time() - u['ts']
# print lapse, u
if lapse > 50:
who.remove(u)
else:
for uu in who:
if u != uu and u['user'] == uu['user'] and u['ip'] == uu['ip']:
who.remove(uu)
# print who
return {"who": who}
run(host='0.0.0.0', port=8085)
| mit | 6,278,180,846,241,621,000 | 23.086207 | 77 | 0.564782 | false | 3.083885 | false | false | false | 0.017895 |
sadleader/odoo | openerp/api.py | 7 | 32048 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" This module provides the elements for managing two different API styles,
namely the "traditional" and "record" styles.
In the "traditional" style, parameters like the database cursor, user id,
context dictionary and record ids (usually denoted as ``cr``, ``uid``,
``context``, ``ids``) are passed explicitly to all methods. In the "record"
style, those parameters are hidden into model instances, which gives it a
more object-oriented feel.
For instance, the statements::
model = self.pool.get(MODEL)
ids = model.search(cr, uid, DOMAIN, context=context)
for rec in model.browse(cr, uid, ids, context=context):
print rec.name
model.write(cr, uid, ids, VALUES, context=context)
may also be written as::
env = Env(cr, uid, context) # cr, uid, context wrapped in env
recs = env[MODEL] # retrieve an instance of MODEL
recs = recs.search(DOMAIN) # search returns a recordset
for rec in recs: # iterate over the records
print rec.name
recs.write(VALUES) # update all records in recs
Methods written in the "traditional" style are automatically decorated,
following some heuristics based on parameter names.
"""
__all__ = [
'Environment',
'Meta', 'guess', 'noguess',
'model', 'multi', 'one',
'cr', 'cr_context', 'cr_uid', 'cr_uid_context',
'cr_uid_id', 'cr_uid_id_context', 'cr_uid_ids', 'cr_uid_ids_context',
'constrains', 'depends', 'onchange', 'returns',
]
import logging
import operator
from inspect import currentframe, getargspec
from collections import defaultdict, MutableMapping
from contextlib import contextmanager
from pprint import pformat
from weakref import WeakSet
from werkzeug.local import Local, release_local
from openerp.tools import frozendict
_logger = logging.getLogger(__name__)
# The following attributes are used, and reflected on wrapping methods:
# - method._constrains: set by @constrains, specifies constraint dependencies
# - method._depends: set by @depends, specifies compute dependencies
# - method._returns: set by @returns, specifies return model
# - method._onchange: set by @onchange, specifies onchange fields
# - method.clear_cache: set by @ormcache, used to clear the cache
#
# On wrapping method only:
# - method._api: decorator function, used for re-applying decorator
# - method._orig: original method
#
WRAPPED_ATTRS = ('__module__', '__name__', '__doc__', '_constrains',
'_depends', '_onchange', '_returns', 'clear_cache')
INHERITED_ATTRS = ('_returns',)
class Meta(type):
""" Metaclass that automatically decorates traditional-style methods by
guessing their API. It also implements the inheritance of the
:func:`returns` decorators.
"""
def __new__(meta, name, bases, attrs):
# dummy parent class to catch overridden methods decorated with 'returns'
parent = type.__new__(meta, name, bases, {})
for key, value in attrs.items():
if not key.startswith('__') and callable(value):
# make the method inherit from decorators
value = propagate(getattr(parent, key, None), value)
# guess calling convention if none is given
if not hasattr(value, '_api'):
try:
value = guess(value)
except TypeError:
pass
attrs[key] = value
return type.__new__(meta, name, bases, attrs)
identity = lambda x: x
def decorate(method, attr, value):
""" Decorate `method` or its original method. """
if getattr(method, '_api', False):
# decorate the original method, and re-apply the api decorator
setattr(method._orig, attr, value)
return method._api(method._orig)
else:
# simply decorate the method itself
setattr(method, attr, value)
return method
def propagate(from_method, to_method):
""" Propagate decorators from `from_method` to `to_method`, and return the
resulting method.
"""
if from_method:
for attr in INHERITED_ATTRS:
if hasattr(from_method, attr) and not hasattr(to_method, attr):
to_method = decorate(to_method, attr, getattr(from_method, attr))
return to_method
def constrains(*args):
""" Decorates a constraint checker. Each argument must be a field name
used in the check::
@api.one
@api.constrains('name', 'description')
def _check_description(self):
if self.name == self.description:
raise ValidationError("Fields name and description must be different")
Invoked on the records on which one of the named fields has been modified.
Should raise :class:`~openerp.exceptions.ValidationError` if the
validation failed.
"""
return lambda method: decorate(method, '_constrains', args)
def onchange(*args):
""" Return a decorator to decorate an onchange method for given fields.
Each argument must be a field name::
@api.onchange('partner_id')
def _onchange_partner(self):
self.message = "Dear %s" % (self.partner_id.name or "")
In the form views where the field appears, the method will be called
when one of the given fields is modified. The method is invoked on a
pseudo-record that contains the values present in the form. Field
assignments on that record are automatically sent back to the client.
"""
return lambda method: decorate(method, '_onchange', args)
def depends(*args):
""" Return a decorator that specifies the field dependencies of a "compute"
method (for new-style function fields). Each argument must be a string
that consists in a dot-separated sequence of field names::
pname = fields.Char(compute='_compute_pname')
@api.one
@api.depends('partner_id.name', 'partner_id.is_company')
def _compute_pname(self):
if self.partner_id.is_company:
self.pname = (self.partner_id.name or "").upper()
else:
self.pname = self.partner_id.name
One may also pass a single function as argument. In that case, the
dependencies are given by calling the function with the field's model.
"""
if args and callable(args[0]):
args = args[0]
elif any('id' in arg.split('.') for arg in args):
raise NotImplementedError("Compute method cannot depend on field 'id'.")
return lambda method: decorate(method, '_depends', args)
def returns(model, downgrade=None):
""" Return a decorator for methods that return instances of `model`.
:param model: a model name, or ``'self'`` for the current model
:param downgrade: a function `downgrade(value)` to convert the
record-style `value` to a traditional-style output
The decorator adapts the method output to the api style: `id`, `ids` or
``False`` for the traditional style, and recordset for the record style::
@model
@returns('res.partner')
def find_partner(self, arg):
... # return some record
# output depends on call style: traditional vs record style
partner_id = model.find_partner(cr, uid, arg, context=context)
# recs = model.browse(cr, uid, ids, context)
partner_record = recs.find_partner(arg)
Note that the decorated method must satisfy that convention.
Those decorators are automatically *inherited*: a method that overrides
a decorated existing method will be decorated with the same
``@returns(model)``.
"""
return lambda method: decorate(method, '_returns', (model, downgrade))
def make_wrapper(decorator, method, old_api, new_api):
""" Return a wrapper method for `method`. """
def wrapper(self, *args, **kwargs):
# avoid hasattr(self, '_ids') because __getattr__() is overridden
if '_ids' in self.__dict__:
return new_api(self, *args, **kwargs)
else:
return old_api(self, *args, **kwargs)
# propagate specific openerp attributes from method to wrapper
for attr in WRAPPED_ATTRS:
if hasattr(method, attr):
setattr(wrapper, attr, getattr(method, attr))
wrapper._api = decorator
wrapper._orig = method
return wrapper
def get_downgrade(method):
""" Return a function `downgrade(value)` that adapts `value` from
record-style to traditional-style, following the convention of `method`.
"""
spec = getattr(method, '_returns', None)
if spec:
model, downgrade = spec
return downgrade or (lambda value: value.ids)
else:
return lambda value: value
def get_upgrade(method):
""" Return a function `upgrade(self, value)` that adapts `value` from
traditional-style to record-style, following the convention of `method`.
"""
spec = getattr(method, '_returns', None)
if spec:
model, downgrade = spec
if model == 'self':
return lambda self, value: self.browse(value)
else:
return lambda self, value: self.env[model].browse(value)
else:
return lambda self, value: value
def get_aggregate(method):
""" Return a function `aggregate(self, value)` that aggregates record-style
`value` for a method decorated with ``@one``.
"""
spec = getattr(method, '_returns', None)
if spec:
# value is a list of instances, concatenate them
model, downgrade = spec
if model == 'self':
return lambda self, value: sum(value, self.browse())
else:
return lambda self, value: sum(value, self.env[model].browse())
else:
return lambda self, value: value
def get_context_split(method):
""" Return a function `split` that extracts the context from a pair of
positional and keyword arguments::
context, args, kwargs = split(args, kwargs)
"""
pos = len(getargspec(method).args) - 1
def split(args, kwargs):
if pos < len(args):
return args[pos], args[:pos], kwargs
else:
return kwargs.pop('context', None), args, kwargs
return split
def model(method):
""" Decorate a record-style method where `self` is a recordset, but its
contents is not relevant, only the model is. Such a method::
@api.model
def method(self, args):
...
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, uid, args, context=context)
Notice that no `ids` are passed to the method in the traditional style.
"""
split = get_context_split(method)
downgrade = get_downgrade(method)
def old_api(self, cr, uid, *args, **kwargs):
context, args, kwargs = split(args, kwargs)
recs = self.browse(cr, uid, [], context)
result = method(recs, *args, **kwargs)
return downgrade(result)
return make_wrapper(model, method, old_api, method)
def multi(method):
""" Decorate a record-style method where `self` is a recordset. The method
typically defines an operation on records. Such a method::
@api.multi
def method(self, args):
...
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, uid, ids, args, context=context)
"""
split = get_context_split(method)
downgrade = get_downgrade(method)
def old_api(self, cr, uid, ids, *args, **kwargs):
context, args, kwargs = split(args, kwargs)
recs = self.browse(cr, uid, ids, context)
result = method(recs, *args, **kwargs)
return downgrade(result)
return make_wrapper(multi, method, old_api, method)
def one(method):
""" Decorate a record-style method where `self` is expected to be a
singleton instance. The decorated method automatically loops on records,
and makes a list with the results. In case the method is decorated with
@returns, it concatenates the resulting instances. Such a method::
@api.one
def method(self, args):
return self.name
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
names = recs.method(args)
names = model.method(cr, uid, ids, args, context=context)
"""
split = get_context_split(method)
downgrade = get_downgrade(method)
aggregate = get_aggregate(method)
def old_api(self, cr, uid, ids, *args, **kwargs):
context, args, kwargs = split(args, kwargs)
recs = self.browse(cr, uid, ids, context)
result = new_api(recs, *args, **kwargs)
return downgrade(result)
def new_api(self, *args, **kwargs):
result = [method(rec, *args, **kwargs) for rec in self]
return aggregate(self, result)
return make_wrapper(one, method, old_api, new_api)
def cr(method):
""" Decorate a traditional-style method that takes `cr` as a parameter.
Such a method may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, args)
"""
upgrade = get_upgrade(method)
def new_api(self, *args, **kwargs):
cr, uid, context = self.env.args
result = method(self._model, cr, *args, **kwargs)
return upgrade(self, result)
return make_wrapper(cr, method, method, new_api)
def cr_context(method):
""" Decorate a traditional-style method that takes `cr`, `context` as parameters. """
upgrade = get_upgrade(method)
def new_api(self, *args, **kwargs):
cr, uid, context = self.env.args
kwargs['context'] = context
result = method(self._model, cr, *args, **kwargs)
return upgrade(self, result)
return make_wrapper(cr_context, method, method, new_api)
def cr_uid(method):
""" Decorate a traditional-style method that takes `cr`, `uid` as parameters. """
upgrade = get_upgrade(method)
def new_api(self, *args, **kwargs):
cr, uid, context = self.env.args
result = method(self._model, cr, uid, *args, **kwargs)
return upgrade(self, result)
return make_wrapper(cr_uid, method, method, new_api)
def cr_uid_context(method):
""" Decorate a traditional-style method that takes `cr`, `uid`, `context` as
parameters. Such a method may be called in both record and traditional
styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, uid, args, context=context)
"""
upgrade = get_upgrade(method)
def new_api(self, *args, **kwargs):
cr, uid, context = self.env.args
kwargs['context'] = context
result = method(self._model, cr, uid, *args, **kwargs)
return upgrade(self, result)
return make_wrapper(cr_uid_context, method, method, new_api)
def cr_uid_id(method):
""" Decorate a traditional-style method that takes `cr`, `uid`, `id` as
parameters. Such a method may be called in both record and traditional
styles. In the record style, the method automatically loops on records.
"""
upgrade = get_upgrade(method)
def new_api(self, *args, **kwargs):
cr, uid, context = self.env.args
result = [method(self._model, cr, uid, id, *args, **kwargs) for id in self.ids]
return upgrade(self, result)
return make_wrapper(cr_uid_id, method, method, new_api)
def cr_uid_id_context(method):
""" Decorate a traditional-style method that takes `cr`, `uid`, `id`,
`context` as parameters. Such a method::
@api.cr_uid_id
def method(self, cr, uid, id, args, context=None):
...
may be called in both record and traditional styles, like::
# rec = model.browse(cr, uid, id, context)
rec.method(args)
model.method(cr, uid, id, args, context=context)
"""
upgrade = get_upgrade(method)
def new_api(self, *args, **kwargs):
cr, uid, context = self.env.args
kwargs['context'] = context
result = [method(self._model, cr, uid, id, *args, **kwargs) for id in self.ids]
return upgrade(self, result)
return make_wrapper(cr_uid_id_context, method, method, new_api)
def cr_uid_ids(method):
""" Decorate a traditional-style method that takes `cr`, `uid`, `ids` as
parameters. Such a method may be called in both record and traditional
styles.
"""
upgrade = get_upgrade(method)
def new_api(self, *args, **kwargs):
cr, uid, context = self.env.args
result = method(self._model, cr, uid, self.ids, *args, **kwargs)
return upgrade(self, result)
return make_wrapper(cr_uid_ids, method, method, new_api)
def cr_uid_ids_context(method):
""" Decorate a traditional-style method that takes `cr`, `uid`, `ids`,
`context` as parameters. Such a method::
@api.cr_uid_ids_context
def method(self, cr, uid, ids, args, context=None):
...
may be called in both record and traditional styles, like::
# recs = model.browse(cr, uid, ids, context)
recs.method(args)
model.method(cr, uid, ids, args, context=context)
It is generally not necessary, see :func:`guess`.
"""
upgrade = get_upgrade(method)
def new_api(self, *args, **kwargs):
cr, uid, context = self.env.args
kwargs['context'] = context
result = method(self._model, cr, uid, self.ids, *args, **kwargs)
return upgrade(self, result)
return make_wrapper(cr_uid_ids_context, method, method, new_api)
def v7(method_v7):
""" Decorate a method that supports the old-style api only. A new-style api
may be provided by redefining a method with the same name and decorated
with :func:`~.v8`::
@api.v7
def foo(self, cr, uid, ids, context=None):
...
@api.v8
def foo(self):
...
Note that the wrapper method uses the docstring of the first method.
"""
# retrieve method_v8 from the caller's frame
frame = currentframe().f_back
method = frame.f_locals.get(method_v7.__name__)
method_v8 = getattr(method, '_v8', method)
wrapper = make_wrapper(v7, method_v7, method_v7, method_v8)
wrapper._v7 = method_v7
wrapper._v8 = method_v8
return wrapper
def v8(method_v8):
""" Decorate a method that supports the new-style api only. An old-style api
may be provided by redefining a method with the same name and decorated
with :func:`~.v7`::
@api.v8
def foo(self):
...
@api.v7
def foo(self, cr, uid, ids, context=None):
...
Note that the wrapper method uses the docstring of the first method.
"""
# retrieve method_v7 from the caller's frame
frame = currentframe().f_back
method = frame.f_locals.get(method_v8.__name__)
method_v7 = getattr(method, '_v7', method)
wrapper = make_wrapper(v8, method_v8, method_v7, method_v8)
wrapper._v7 = method_v7
wrapper._v8 = method_v8
return wrapper
def noguess(method):
""" Decorate a method to prevent any effect from :func:`guess`. """
method._api = False
return method
def guess(method):
""" Decorate `method` to make it callable in both traditional and record
styles. This decorator is applied automatically by the model's
metaclass, and has no effect on already-decorated methods.
The API style is determined by heuristics on the parameter names: ``cr``
or ``cursor`` for the cursor, ``uid`` or ``user`` for the user id,
``id`` or ``ids`` for a list of record ids, and ``context`` for the
context dictionary. If a traditional API is recognized, one of the
decorators :func:`cr`, :func:`cr_context`, :func:`cr_uid`,
:func:`cr_uid_context`, :func:`cr_uid_id`, :func:`cr_uid_id_context`,
:func:`cr_uid_ids`, :func:`cr_uid_ids_context` is applied on the method.
Method calls are considered traditional style when their first parameter
is a database cursor.
"""
if hasattr(method, '_api'):
return method
# introspection on argument names to determine api style
args, vname, kwname, defaults = getargspec(method)
names = tuple(args) + (None,) * 4
if names[0] == 'self':
if names[1] in ('cr', 'cursor'):
if names[2] in ('uid', 'user'):
if names[3] == 'ids':
if 'context' in names or kwname:
return cr_uid_ids_context(method)
else:
return cr_uid_ids(method)
elif names[3] == 'id':
if 'context' in names or kwname:
return cr_uid_id_context(method)
else:
return cr_uid_id(method)
elif 'context' in names or kwname:
return cr_uid_context(method)
else:
return cr_uid(method)
elif 'context' in names:
return cr_context(method)
else:
return cr(method)
# no wrapping by default
return noguess(method)
def expected(decorator, func):
""" Decorate `func` with `decorator` if `func` is not wrapped yet. """
return decorator(func) if not hasattr(func, '_api') else func
class Environment(object):
""" An environment wraps data for ORM records:
- :attr:`cr`, the current database cursor;
- :attr:`uid`, the current user id;
- :attr:`context`, the current context dictionary.
It also provides access to the registry, a cache for records, and a data
structure to manage recomputations.
"""
_local = Local()
@classmethod
@contextmanager
def manage(cls):
""" Context manager for a set of environments. """
if hasattr(cls._local, 'environments'):
yield
else:
try:
cls._local.environments = Environments()
yield
finally:
release_local(cls._local)
@classmethod
def reset(cls):
""" Clear the set of environments.
This may be useful when recreating a registry inside a transaction.
"""
cls._local.environments = Environments()
def __new__(cls, cr, uid, context):
assert context is not None
args = (cr, uid, context)
# if env already exists, return it
env, envs = None, cls._local.environments
for env in envs:
if env.args == args:
return env
# otherwise create environment, and add it in the set
self = object.__new__(cls)
self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context))
self.registry = RegistryManager.get(cr.dbname)
self.cache = defaultdict(dict) # {field: {id: value, ...}, ...}
self.prefetch = defaultdict(set) # {model_name: set(id), ...}
self.computed = defaultdict(set) # {field: set(id), ...}
self.dirty = defaultdict(set) # {record: set(field_name), ...}
self.all = envs
envs.add(self)
return self
def __getitem__(self, model_name):
""" return a given model """
return self.registry[model_name]._browse(self, ())
def __call__(self, cr=None, user=None, context=None):
""" Return an environment based on `self` with modified parameters.
:param cr: optional database cursor to change the current cursor
:param user: optional user/user id to change the current user
:param context: optional context dictionary to change the current context
"""
cr = self.cr if cr is None else cr
uid = self.uid if user is None else int(user)
context = self.context if context is None else context
return Environment(cr, uid, context)
def ref(self, xml_id, raise_if_not_found=True):
""" return the record corresponding to the given `xml_id` """
return self['ir.model.data'].xmlid_to_object(xml_id, raise_if_not_found=raise_if_not_found)
@property
def user(self):
""" return the current user (as an instance) """
return self(user=SUPERUSER_ID)['res.users'].browse(self.uid)
@property
def lang(self):
""" return the current language code """
return self.context.get('lang')
@contextmanager
def _do_in_mode(self, mode):
if self.all.mode:
yield
else:
try:
self.all.mode = mode
yield
finally:
self.all.mode = False
self.dirty.clear()
def do_in_draft(self):
""" Context-switch to draft mode, where all field updates are done in
cache only.
"""
return self._do_in_mode(True)
@property
def in_draft(self):
""" Return whether we are in draft mode. """
return bool(self.all.mode)
def do_in_onchange(self):
""" Context-switch to 'onchange' draft mode, which is a specialized
draft mode used during execution of onchange methods.
"""
return self._do_in_mode('onchange')
@property
def in_onchange(self):
""" Return whether we are in 'onchange' draft mode. """
return self.all.mode == 'onchange'
def invalidate(self, spec):
""" Invalidate some fields for some records in the cache of all
environments.
:param spec: what to invalidate, a list of `(field, ids)` pair,
where `field` is a field object, and `ids` is a list of record
ids or ``None`` (to invalidate all records).
"""
if not spec:
return
for env in list(self.all):
c = env.cache
for field, ids in spec:
if ids is None:
if field in c:
del c[field]
else:
field_cache = c[field]
for id in ids:
field_cache.pop(id, None)
def invalidate_all(self):
""" Clear the cache of all environments. """
for env in list(self.all):
env.cache.clear()
env.prefetch.clear()
env.computed.clear()
env.dirty.clear()
def clear(self):
""" Clear all record caches, and discard all fields to recompute.
This may be useful when recovering from a failed ORM operation.
"""
self.invalidate_all()
self.all.todo.clear()
@contextmanager
def clear_upon_failure(self):
""" Context manager that clears the environments (caches and fields to
recompute) upon exception.
"""
try:
yield
except Exception:
self.clear()
raise
def field_todo(self, field):
""" Check whether `field` must be recomputed, and returns a recordset
with all records to recompute for `field`.
"""
if field in self.all.todo:
return reduce(operator.or_, self.all.todo[field])
def check_todo(self, field, record):
""" Check whether `field` must be recomputed on `record`, and if so,
returns the corresponding recordset to recompute.
"""
for recs in self.all.todo.get(field, []):
if recs & record:
return recs
def add_todo(self, field, records):
""" Mark `field` to be recomputed on `records`. """
recs_list = self.all.todo.setdefault(field, [])
# use user admin for accessing records without access rights issues
recs_list.append(records.sudo())
def remove_todo(self, field, records):
""" Mark `field` as recomputed on `records`. """
recs_list = [recs - records for recs in self.all.todo.pop(field, [])]
recs_list = filter(None, recs_list)
if recs_list:
self.all.todo[field] = recs_list
def has_todo(self):
""" Return whether some fields must be recomputed. """
return bool(self.all.todo)
def get_todo(self):
""" Return a pair `(field, records)` to recompute. """
for field, recs_list in self.all.todo.iteritems():
return field, recs_list[0]
def check_cache(self):
""" Check the cache consistency. """
# make a full copy of the cache, and invalidate it
cache_dump = dict(
(field, dict(field_cache))
for field, field_cache in self.cache.iteritems()
)
self.invalidate_all()
# re-fetch the records, and compare with their former cache
invalids = []
for field, field_dump in cache_dump.iteritems():
ids = filter(None, field_dump)
records = self[field.model_name].browse(ids)
for record in records:
try:
cached = field_dump[record.id]
fetched = record[field.name]
if fetched != cached:
info = {'cached': cached, 'fetched': fetched}
invalids.append((field, record, info))
except (AccessError, MissingError):
pass
if invalids:
raise Warning('Invalid cache for fields\n' + pformat(invalids))
class Environments(object):
""" A common object for all environments in a request. """
def __init__(self):
self.envs = WeakSet() # weak set of environments
self.todo = {} # recomputations {field: [records]}
self.mode = False # flag for draft/onchange
def add(self, env):
""" Add the environment `env`. """
self.envs.add(env)
def __iter__(self):
""" Iterate over environments. """
return iter(self.envs)
# keep those imports here in order to handle cyclic dependencies correctly
from openerp import SUPERUSER_ID
from openerp.exceptions import Warning, AccessError, MissingError
from openerp.modules.registry import RegistryManager
| agpl-3.0 | 52,575,901,038,153,500 | 34.063457 | 99 | 0.596231 | false | 4.141639 | false | false | false | 0.000905 |
vigilv/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause | -4,896,713,396,852,368,000 | 26.409091 | 77 | 0.640962 | false | 2.948655 | false | false | false | 0.002488 |
matsprea/omim | 3party/protobuf/python/google/protobuf/reflection.py | 71 | 8012 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
from google.protobuf import descriptor as descriptor_mod
from google.protobuf import message
_FieldDescriptor = descriptor_mod.FieldDescriptor
if api_implementation.Type() == 'cpp':
if api_implementation.Version() == 2:
from google.protobuf.pyext import cpp_message
_NewMessage = cpp_message.NewMessage
_InitMessage = cpp_message.InitMessage
else:
from google.protobuf.internal import cpp_message
_NewMessage = cpp_message.NewMessage
_InitMessage = cpp_message.InitMessage
else:
from google.protobuf.internal import python_message
_NewMessage = python_message.NewMessage
_InitMessage = python_message.InitMessage
class GeneratedProtocolMessageType(type):
"""Metaclass for protocol message classes created at runtime from Descriptors.
We add implementations for all methods described in the Message class. We
also create properties to allow getting/setting all fields in the protocol
message. Finally, we create slots to prevent users from accidentally
"setting" nonexistent fields in the protocol message, which then wouldn't get
serialized / deserialized properly.
The protocol compiler currently uses this metaclass to create protocol
message classes at runtime. Clients can also manually create their own
classes at runtime, as in this example:
mydescriptor = Descriptor(.....)
class MyProtoClass(Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = mydescriptor
myproto_instance = MyProtoClass()
myproto.foo_field = 23
...
The above example will not work for nested types. If you wish to include them,
use reflection.MakeClass() instead of manually instantiating the class in
order to create the appropriate class structure.
"""
# Must be consistent with the protocol-compiler code in
# proto2/compiler/internal/generator.*.
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __new__(cls, name, bases, dictionary):
"""Custom allocation for runtime-generated class types.
We override __new__ because this is apparently the only place
where we can meaningfully set __slots__ on the class we're creating(?).
(The interplay between metaclasses and slots is not very well-documented).
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
Returns:
Newly-allocated class.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
bases = _NewMessage(bases, descriptor, dictionary)
superclass = super(GeneratedProtocolMessageType, cls)
new_class = superclass.__new__(cls, name, bases, dictionary)
setattr(descriptor, '_concrete_class', new_class)
return new_class
def __init__(cls, name, bases, dictionary):
"""Here we perform the majority of our work on the class.
We add enum getters, an __init__ method, implementations
of all Message methods, and properties for all fields
in the protocol type.
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
_InitMessage(descriptor, cls)
superclass = super(GeneratedProtocolMessageType, cls)
superclass.__init__(name, bases, dictionary)
def ParseMessage(descriptor, byte_str):
"""Generate a new Message instance from this Descriptor and a byte string.
Args:
descriptor: Protobuf Descriptor object
byte_str: Serialized protocol buffer byte string
Returns:
Newly created protobuf Message object.
"""
result_class = MakeClass(descriptor)
new_msg = result_class()
new_msg.ParseFromString(byte_str)
return new_msg
def MakeClass(descriptor):
"""Construct a class object for a protobuf described by descriptor.
Composite descriptors are handled by defining the new class as a member of the
parent class, recursing as deep as necessary.
This is the dynamic equivalent to:
class Parent(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor
class Child(message.Message):
__metaclass__ = GeneratedProtocolMessageType
DESCRIPTOR = descriptor.nested_types[0]
Sample usage:
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(proto2_string)
msg_descriptor = descriptor.MakeDescriptor(file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
Args:
descriptor: A descriptor.Descriptor object describing the protobuf.
Returns:
The Message class object described by the descriptor.
"""
attributes = {}
for name, nested_type in descriptor.nested_types_by_name.items():
attributes[name] = MakeClass(nested_type)
attributes[GeneratedProtocolMessageType._DESCRIPTOR_KEY] = descriptor
return GeneratedProtocolMessageType(str(descriptor.name), (message.Message,),
attributes)
| apache-2.0 | 3,486,283,460,033,361,000 | 38.082927 | 80 | 0.74638 | false | 4.644638 | false | false | false | 0.002996 |
ahmetcemturan/SFACT | fabmetheus_utilities/settings.py | 1 | 78895 | """
Settings is a collection of utilities to display, read & write the settings and position widgets.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
import cStringIO
import math
import os
import shutil
import sys
import traceback
import webbrowser
try:
import Tkinter
except:
print('You do not have Tkinter, which is needed for the graphical interface, you will only be able to use the command line.')
print('Information on how to download Tkinter is at:\nwww.tcl.tk/software/tcltk/')
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = "$Date: 2008/23/04 $"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalRepositoryDialogListTable = {}
globalProfileSaveListenerListTable = {}
globalCloseListTables = [globalRepositoryDialogListTable, globalProfileSaveListenerListTable]
globalSettingReplacements = {
'Perimeter Width over Thickness (ratio):' : 'Edge Width over Height (ratio):',
'Layer Thickness (mm):' : 'Layer Height (mm):',
'Location Arrival X (mm):' : 'Arrival X (mm):',
'Location Arrival Y (mm):' : 'Arrival Y (mm):',
'Location Arrival Z (mm):' : 'Arrival Z (mm):',
'Location Departure X (mm):' : 'Departure X (mm):',
'Location Departure Y (mm):' : 'Departure Y (mm):',
'Location Departure Z (mm):' : 'Departure Z (mm):',
'Location Wipe X (mm):' : 'Wipe X (mm):',
'Location Wipe Y (mm):' : 'Wipe Y (mm):',
'Location Wipe Z (mm):' : 'Wipe Z (mm):'
}
globalSpreadsheetSeparator = '\t'
globalTemporaryOverrides = {}
def addAcceleratorCommand( acceleratorBinding, commandFunction, master, menu, text ):
"Add accelerator command."
acceleratorText = acceleratorBinding[1 : -1]
lastIndexOfMinus = acceleratorText.rfind('-')
if lastIndexOfMinus > - 1:
acceleratorText = acceleratorText[ : lastIndexOfMinus + 1 ] + acceleratorText[ lastIndexOfMinus + 1 : ].capitalize()
acceleratorText = acceleratorText.replace('KeyPress-', '')
acceleratorText = acceleratorText.replace('-', '+')
acceleratorText = acceleratorText.replace('Control', 'Ctrl')
acceleratorBinding = acceleratorBinding.replace('KeyPress', '')
menu.add_command( accelerator = acceleratorText, label = text, underline = 0, command = commandFunction )
master.bind( acceleratorBinding, commandFunction )
def addEmptyRow( gridPosition ):
"Add an empty row."
gridPosition.increment()
Tkinter.Label( gridPosition.master ).grid( row = gridPosition.row, column = gridPosition.column )
def addListsToRepository(fileNameHelp, repository):
'Add the value to the lists.'
addListsToRepositoryByFunction(fileNameHelp, None, repository)
def addListsToRepositoryByFunction(fileNameHelp, getProfileDirectory, repository):
'Add the value to the lists.'
repository.displayEntities = []
repository.executeTitle = None
repository.fileNameHelp = fileNameHelp
repository.fileNameInput = None
repository.lowerName = fileNameHelp.split('.')[-2]
repository.baseName = repository.lowerName + '.csv'
repository.baseNameSynonym = None
repository.baseNameSynonymDictionary = None
repository.capitalizedName = getEachWordCapitalized( repository.lowerName )
repository.getProfileDirectory = getProfileDirectory
repository.openLocalHelpPage = HelpPage().getOpenFromDocumentationSubName( repository.fileNameHelp )
repository.openWikiManualHelpPage = None
repository.preferences = []
repository.repositoryDialog = None
repository.saveListenerTable = {}
repository.title = repository.capitalizedName + ' Settings'
repository.menuEntities = []
repository.saveCloseTitle = 'Save and Close'
repository.windowPosition = WindowPosition().getFromValue( repository, '0+0')
for setting in repository.preferences:
setting.repository = repository
def addMenuEntitiesToMenu( menu, menuEntities ):
"Add the menu entities to the menu."
for menuEntity in menuEntities:
menuEntity.addToMenu( menu )
def addMenuEntitiesToMenuFrameable( menu, menuEntities ):
"Add the menu entities to the menu."
for menuEntity in menuEntities:
menuEntity.addToMenuFrameable( menu )
def addPluginsParentToMenu( directoryPath, menu, parentPath, pluginFileNames ):
"Add plugins and the parent to the menu."
ToolDialog().addPluginToMenu( menu, parentPath[ : parentPath.rfind('.') ] )
menu.add_separator()
addPluginsToMenu( directoryPath, menu, pluginFileNames )
def addPluginsToMenu( directoryPath, menu, pluginFileNames ):
"Add plugins to the menu."
for pluginFileName in pluginFileNames:
ToolDialog().addPluginToMenu( menu, os.path.join( directoryPath, pluginFileName ) )
def cancelRepository(repository):
"Read the repository then set all the entities to the read repository values."
getReadRepository(repository)
for setting in repository.displayEntities:
if setting in repository.preferences:
setting.setStateToValue()
def deleteDirectory( directory, subfolderName ):
"Delete the directory if it exists."
subDirectory = os.path.join( directory, subfolderName )
if os.path.isdir( subDirectory ):
shutil.rmtree( subDirectory )
def deleteMenuItems( menu ):
"Delete the menu items."
try:
lastMenuIndex = menu.index( Tkinter.END )
if lastMenuIndex != None:
menu.delete( 0, lastMenuIndex )
except:
print('this should never happen, the lastMenuIndex in deleteMenuItems in settings could not be determined.')
def getAlongWayHexadecimalColor( beginBrightness, colorWidth, difference, endColorTuple, wayLength ):
"Get a color along the way from begin brightness to the end color."
alongWay = 1.0
if wayLength != 0.0:
alongWay = 0.4 + 0.6 * min( 1.0, abs( float( difference ) / float( wayLength ) ) )
hexadecimalColor = '#'
oneMinusAlongWay = 1.0 - alongWay
for primaryIndex in xrange(3):
hexadecimalColor += getAlongWayHexadecimalPrimary( beginBrightness, oneMinusAlongWay, colorWidth, endColorTuple[ primaryIndex ], alongWay )
return hexadecimalColor
def getAlongWayHexadecimalPrimary( beginBrightness, beginRatio, colorWidth, endBrightness, endRatio ):
"Get a primary color along the way from grey to the end color."
brightness = beginRatio * float( beginBrightness ) + endRatio * float( endBrightness )
return getWidthHex( int( round( brightness ) ), colorWidth )
def getAlterationFile(fileName):
"Get the file from the fileName or the lowercase fileName in the alterations directories."
settingsAlterationsDirectory = archive.getSettingsPath('alterations')
archive.makeDirectory(settingsAlterationsDirectory)
fileInSettingsAlterationsDirectory = getFileInGivenDirectory(settingsAlterationsDirectory, fileName)
if fileInSettingsAlterationsDirectory != '':
return fileInSettingsAlterationsDirectory
alterationsDirectory = archive.getSkeinforgePath('alterations')
return getFileInGivenDirectory(alterationsDirectory, fileName)
def getAlterationFileLine(fileName):
"Get the alteration file line from the fileName."
lines = getAlterationLines(fileName)
if len(lines) == 0:
return []
return getAlterationFileLineBlindly(fileName)
def getAlterationFileLineBlindly(fileName):
"Get the alteration file line from the fileName."
return '(<alterationFile>) %s (</alterationFile>)' % fileName
def getAlterationFileLines(fileName):
'Get the alteration file line and the text lines from the fileName in the alterations directories.'
lines = getAlterationLines(fileName)
if len(lines) == 0:
return []
return [getAlterationFileLineBlindly(fileName)] + lines
def getAlterationLines(fileName):
"Get the text lines from the fileName in the alterations directories."
return archive.getTextLines(getAlterationFile(fileName))
def getDisplayedDialogFromConstructor(repository):
"Display the repository dialog."
try:
getReadRepository(repository)
return RepositoryDialog( repository, Tkinter.Tk() )
except:
print('this should never happen, getDisplayedDialogFromConstructor in settings could not open')
print(repository)
traceback.print_exc(file=sys.stdout)
return None
def getDisplayedDialogFromPath(path):
"Display the repository dialog."
pluginModule = archive.getModuleWithPath(path)
if pluginModule == None:
return None
return getDisplayedDialogFromConstructor( pluginModule.getNewRepository() )
def getDisplayToolButtonsRepository( directoryPath, importantFileNames, names, repository ):
"Get the display tool buttons."
displayToolButtons = []
for name in names:
displayToolButton = DisplayToolButton().getFromPath( name in importantFileNames, name, os.path.join( directoryPath, name ), repository )
displayToolButtons.append( displayToolButton )
return displayToolButtons
def getEachWordCapitalized( name ):
"Get the capitalized name."
withSpaces = name.lower().replace('_', ' ')
words = withSpaces.split(' ')
capitalizedStrings = []
for word in words:
capitalizedStrings.append( word.capitalize() )
return ' '.join( capitalizedStrings )
def getFileInGivenDirectory( directory, fileName ):
"Get the file from the fileName or the lowercase fileName in the given directory."
directoryListing = os.listdir(directory)
lowerFileName = fileName.lower()
for directoryFile in directoryListing:
if directoryFile.lower() == lowerFileName:
return getFileTextGivenDirectoryFileName( directory, directoryFile )
return ''
def getFileTextGivenDirectoryFileName( directory, fileName ):
"Get the entire text of a file with the given file name in the given directory."
absoluteFilePath = os.path.join( directory, fileName )
return archive.getFileText( absoluteFilePath )
def getFolders(directory):
"Get the folder list in a directory."
archive.makeDirectory(directory)
directoryListing = []
try:
directoryListing = os.listdir(directory)
except OSError:
print('Skeinforge can not list the directory:')
print(directory)
print('so give it read/write permission for that directory.')
folders = []
for fileName in directoryListing:
if os.path.isdir( os.path.join( directory, fileName ) ):
folders.append(fileName)
return folders
def getGlobalRepositoryDialogValues():
"Get the global repository dialog values."
global globalRepositoryDialogListTable
return euclidean.getListTableElements(globalRepositoryDialogListTable)
def getPathInFabmetheusFromFileNameHelp( fileNameHelp ):
"Get the directory path from file name help."
fabmetheusPath = archive.getFabmetheusPath()
splitFileNameHelps = fileNameHelp.split('.')
splitFileNameDirectoryNames = splitFileNameHelps[ : - 1 ]
for splitFileNameDirectoryName in splitFileNameDirectoryNames:
fabmetheusPath = os.path.join( fabmetheusPath, splitFileNameDirectoryName )
return fabmetheusPath
def getProfileBaseName(repository):
"Get the profile base file name."
return getProfileName(repository.baseName, repository)
def getProfilesDirectoryInAboveDirectory(subName=''):
"Get the profiles directory path in the above directory."
aboveProfilesDirectory = archive.getSkeinforgePath('profiles')
if subName == '':
return aboveProfilesDirectory
return os.path.join( aboveProfilesDirectory, subName )
def getProfileName(name, repository):
"Get the name, joined with the profile directory if there is one."
if repository.getProfileDirectory == None:
return name
return os.path.join(repository.getProfileDirectory(), name)
def getRadioPluginsAddPluginFrame( directoryPath, importantFileNames, names, repository ):
"Get the radio plugins and add the plugin frame."
repository.pluginFrame = PluginFrame()
radioPlugins = []
for name in names:
radioPlugin = RadioPlugin().getFromRadio( name in importantFileNames, repository.pluginFrame.latentStringVar, name, repository, name == importantFileNames[0] )
radioPlugin.updateFunction = repository.pluginFrame.update
radioPlugins.append( radioPlugin )
defaultRadioButton = getSelectedRadioPlugin( importantFileNames + [ radioPlugins[0].name ], radioPlugins )
repository.pluginFrame.getFromPath( defaultRadioButton, directoryPath, repository )
return radioPlugins
def getReadRepository(repository):
"Read and return settings from a file."
text = archive.getFileText(archive.getProfilesPath(getProfileBaseName(repository)), False)
if text == '':
if repository.baseNameSynonym != None:
text = archive.getFileText(archive.getProfilesPath(getProfileName(repository.baseNameSynonym, repository)), False)
if text == '':
print('The default %s will be written in the .skeinforge folder in the home directory.' % repository.title.lower() )
text = archive.getFileText(getProfilesDirectoryInAboveDirectory(getProfileBaseName(repository)), False)
if text != '':
readSettingsFromText(repository, text)
writeSettings(repository)
temporaryApplyOverrides(repository)
return repository
readSettingsFromText(repository, text)
temporaryApplyOverrides(repository)
return repository
def getRepositoryText(repository):
"Get the text representation of the repository."
repositoryWriter = getRepositoryWriter(repository.title.lower())
for setting in repository.preferences:
setting.writeToRepositoryWriter(repositoryWriter)
return repositoryWriter.getvalue()
def getRepositoryWriter(title):
"Get the repository writer for the title."
repositoryWriter = cStringIO.StringIO()
repositoryWriter.write('Format is tab separated %s.\n' % title)
repositoryWriter.write('_Name %sValue\n' % globalSpreadsheetSeparator)
return repositoryWriter
def getSelectedPluginModuleFromPath(filePath, plugins):
"Get the selected plugin module."
for plugin in plugins:
if plugin.value:
return gcodec.getModuleFromPath(plugin.name, filePath)
return None
def getSelectedPluginName( plugins ):
"Get the selected plugin name."
for plugin in plugins:
if plugin.value:
return plugin.name
return ''
def getSelectedRadioPlugin( names, radioPlugins ):
"Get the selected radio button if it exists, None otherwise."
for radioPlugin in radioPlugins:
if radioPlugin.value:
return radioPlugin
for name in names:
for radioPlugin in radioPlugins:
if radioPlugin.name == name:
radioPlugin.value = True
return radioPlugin
print('this should never happen, no getSelectedRadioPlugin in settings')
print(names)
return radioPlugin[0]
def getShortestUniqueSettingName(settingName, settings):
"Get the shortest unique name in the settings."
for length in xrange(3, len(settingName)):
numberOfEquals = 0
shortName = settingName[: length]
for setting in settings:
if setting.name[: length] == shortName:
numberOfEquals += 1
if numberOfEquals < 2:
return shortName.lower()
return settingName.lower()
def getSubfolderWithBasename( basename, directory ):
"Get the subfolder in the directory with the basename."
archive.makeDirectory(directory)
directoryListing = os.listdir(directory)
for fileName in directoryListing:
joinedFileName = os.path.join( directory, fileName )
if os.path.isdir(joinedFileName):
if basename == fileName:
return joinedFileName
return None
def getTitleFromName( title ):
"Get the title of this setting."
if title[-1] == ':':
title = title[ : - 1 ]
spaceBracketIndex = title.find(' (')
if spaceBracketIndex > - 1:
return title[ : spaceBracketIndex ]
return title
def getUntilFirstBracket(text):
'Get the text until the first bracket, if any.'
dotIndex = text.find('(')
if dotIndex < 0:
return text
return text[: dotIndex]
def getWidthHex( number, width ):
"Get the first width hexadecimal digits."
return ('0000%s' % hex(number)[ 2 : ] )[ - width : ]
def liftRepositoryDialogs( repositoryDialogs ):
"Lift the repository dialogs."
for repositoryDialog in repositoryDialogs:
repositoryDialog.root.withdraw() # the withdraw & deiconify trick is here because lift does not work properly on my linux computer
repositoryDialog.root.lift() # probably not necessary, here in case the withdraw & deiconify trick does not work on some other computer
repositoryDialog.root.deiconify()
repositoryDialog.root.lift() # probably not necessary, here in case the withdraw & deiconify trick does not work on some other computer
repositoryDialog.root.update_idletasks()
def openSVGPage( fileName, svgViewer ):
"Open svg page with an svg program."
if svgViewer == '':
return
if svgViewer == 'webbrowser':
openWebPage(fileName)
return
filePath = '"' + os.path.normpath(fileName) + '"' # " to send in file name with spaces
shellCommand = svgViewer + ' ' + filePath
commandResult = os.system(shellCommand)
if commandResult != 0:
print('It may be that the system could not find the %s program.' % svgViewer )
print('If so, try installing the %s program or look for another svg viewer, like Netscape which can be found at:' % svgViewer )
print('http://www.netscape.org/')
print('')
def openWebPage( webPagePath ):
"Open a web page in a browser."
if webPagePath.find('#') != - 1: # to get around # encode bug
redirectionText = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">\n<html>\n<head>\n'
redirectionText += '<meta http-equiv="REFRESH" content="0;url=%s"></head>\n</HTML>\n' % webPagePath
webPagePath = archive.getDocumentationPath('redirect.html')
archive.writeFileText( webPagePath, redirectionText )
webPagePath = '"%s"' % webPagePath # " to get around space in url bug
try: # " to get around using gnome-open or internet explorer for webbrowser default
webbrowserController = webbrowser.get('firefox')
except:
webbrowserController = webbrowser.get()
webbrowserName = webbrowserController.name
if webbrowserName == '':
try:
os.startfile( webPagePath )#this is available on some python environments, but not all
return
except:
pass
print('Skeinforge was not able to open the file in a web browser. To see the documentation, open the following file in a web browser:')
print(webPagePath)
return
else:
os.system(webbrowserName + ' ' + webPagePath)#used this instead of webbrowser.open() to workaround webbrowser open() bug
def printProgress(layerIndex, procedureName):
"Print layerIndex followed by a carriage return."
printProgressByString('%s layer count %s...' % (procedureName.capitalize(), layerIndex + 1))
def printProgressByNumber(layerIndex, numberOfLayers, procedureName):
"Print layerIndex and numberOfLayers followed by a carriage return."
printProgressByString('%s layer count %s of %s...' % (procedureName.capitalize(), layerIndex + 1, numberOfLayers))
def printProgressByString(progressString):
"Print progress string."
sys.stdout.write(progressString)
sys.stdout.write(chr(27) + '\r')
sys.stdout.flush()
def quitWindow(root):
"Quit a window."
try:
root.destroy()
except:
pass
def quitWindows( event=None ):
"Quit all windows."
global globalRepositoryDialogListTable
globalRepositoryDialogValues = euclidean.getListTableElements( globalRepositoryDialogListTable )
for globalRepositoryDialogValue in globalRepositoryDialogValues:
quitWindow(globalRepositoryDialogValue.root)
def readSettingsFromText(repository, text):
"Read settings from a text."
text = text.replace(('\nName %sValue\n' % globalSpreadsheetSeparator), ('\n_Name %sValue\n' % globalSpreadsheetSeparator))
lines = archive.getTextLines(text)
shortDictionary = {}
for setting in repository.preferences:
shortDictionary[getShortestUniqueSettingName(setting.name, repository.preferences)] = setting
if repository.baseNameSynonymDictionary != None:
synonymDictionaryCopy = repository.baseNameSynonymDictionary.copy()
for line in lines:
splitLine = line.split(globalSpreadsheetSeparator)
if len(splitLine) > 1:
if splitLine[0] in synonymDictionaryCopy:
del synonymDictionaryCopy[splitLine[0]]
for synonymDictionaryCopyKey in synonymDictionaryCopy.keys():
text = archive.getFileText(archive.getProfilesPath(getProfileName(synonymDictionaryCopy[synonymDictionaryCopyKey], repository)), False)
synonymLines = archive.getTextLines(text)
for synonymLine in synonymLines:
splitLine = synonymLine.split(globalSpreadsheetSeparator)
if len(splitLine) > 1:
if splitLine[0] == synonymDictionaryCopyKey:
lines.append(synonymLine)
for lineIndex in xrange(len(lines)):
setRepositoryToLine(lineIndex, lines, shortDictionary)
def saveAll():
"Save all the dialogs."
for globalRepositoryDialogValue in getGlobalRepositoryDialogValues():
globalRepositoryDialogValue.save()
def saveRepository(repository):
"Set the entities to the dialog then write them."
for setting in repository.preferences:
setting.setToDisplay()
writeSettingsPrintMessage(repository)
for saveListener in repository.saveListenerTable.values():
saveListener()
def setButtonFontWeightString( button, isBold ):
"Set button font weight given isBold."
try:
weightString = 'normal'
if isBold:
weightString = 'bold'
splitFont = button['font'].split()
button['font'] = ( splitFont[0], splitFont[1], weightString )
except:
pass
def setEntryText(entry, value):
"Set the entry text."
if entry == None:
return
entry.delete(0, Tkinter.END)
entry.insert(0, str(value))
def setIntegerValueToString( integerSetting, valueString ):
"Set the integer to the string."
dotIndex = valueString.find('.')
if dotIndex > - 1:
valueString = valueString[: dotIndex]
try:
integerSetting.value = int( valueString )
return
except:
print('Warning, can not read integer ' + integerSetting.name + ' ' + valueString )
print('Will try reading as a boolean, which might be a mistake.')
integerSetting.value = 0
if valueString.lower() == 'true':
integerSetting.value = 1
def setRepositoryToLine(lineIndex, lines, shortDictionary):
"Set setting dictionary to a setting line.globalSettingReplacements"
line = lines[lineIndex]
splitLine = line.split(globalSpreadsheetSeparator)
if len(splitLine) < 2:
return
fileSettingName = splitLine[0]
if fileSettingName in globalSettingReplacements:
fileSettingName = globalSettingReplacements[fileSettingName]
shortDictionaryKeys = shortDictionary.keys()
shortDictionaryKeys.sort(key=len, reverse=True) # so that a short word like fill is not overidden by a longer word like fillet
for shortDictionaryKey in shortDictionaryKeys:
if fileSettingName[: len(shortDictionaryKey)].lower() == shortDictionaryKey:
shortDictionary[shortDictionaryKey].setValueToSplitLine(lineIndex, lines, splitLine)
return
def setSpinColor( setting ):
"Set the spin box color to the value, yellow if it is lower than the default and blue if it is higher."
if setting.entry == None:
return
if setting.backgroundColor == None:
setting.backgroundColor = setting.entry['background']
if setting.backgroundColor[0] != '#':
setting.backgroundColor = '#ffffff'
setting.colorWidth = len( setting.backgroundColor ) / 3
setting.grey = int( setting.backgroundColor[ 1 : 1 + setting.colorWidth ], 16 )
setting.white = int('f' * setting.colorWidth, 16 )
if abs( setting.value - setting.defaultValue ) <= 0.75 * setting.increment:
setting.entry['background'] = setting.backgroundColor
return
difference = setting.value - setting.defaultValue
if difference > 0.0:
wayLength = setting.to - setting.defaultValue
setting.entry['background'] = getAlongWayHexadecimalColor( setting.grey, setting.colorWidth, difference, ( 0, setting.white, setting.white ), wayLength )
return
wayLength = setting.from_ - setting.defaultValue
setting.entry['background'] = getAlongWayHexadecimalColor( setting.grey, setting.colorWidth, difference, ( setting.white, setting.white, 0 ), wayLength )
def startMainLoopFromConstructor(repository):
"Display the repository dialog and start the main loop."
try:
import Tkinter
except:
return
displayedDialogFromConstructor = getDisplayedDialogFromConstructor(repository)
if displayedDialogFromConstructor == None:
print('Warning, displayedDialogFromConstructor in settings is none, so the window will not be displayed.')
else:
displayedDialogFromConstructor.root.mainloop()
def startMainLoopFromWindow(window):
'Display the tableau window and start the main loop.'
if window == None:
return
if window.root == None:
print('Warning, window.root in startMainLoopFromWindow in settings is none, so the window will not be displayed.')
return
window.root.mainloop()
def temporaryAddPreferenceOverride(module, name, value):
global globalTemporaryOverrides
if not module in globalTemporaryOverrides:
globalTemporaryOverrides[module] = {}
globalTemporaryOverrides[module][name] = value
print('OVERRIDE %s %s %s' % (module,name,value))
print(globalTemporaryOverrides[module])
def temporaryApplyOverrides(repository):
'Apply any overrides that have been set at the command line.'
# The override dictionary is a mapping of repository names to
# key-value mappings.
global globalTemporaryOverrides
if repository.baseName in globalTemporaryOverrides:
settingTable = {}
for setting in repository.preferences:
settingTable[ setting.name ] = setting
for (name, value) in overrides[repository.baseName].items():
if name in settingTable:
settingTable[name].setValueToString(value)
else:
print('Override not applied for: %s, %s' % (name,value))
def writeSettings(repository):
"Write the settings to a file."
profilesDirectoryPath = archive.getProfilesPath(getProfileBaseName(repository))
archive.makeDirectory(os.path.dirname(profilesDirectoryPath))
archive.writeFileText(profilesDirectoryPath, getRepositoryText(repository))
for setting in repository.preferences:
setting.updateSaveListeners()
def writeSettingsPrintMessage(repository):
"Set the settings to the dialog then write them."
writeSettings(repository)
print(repository.title.lower().capitalize() + ' have been saved.')
def writeValueListToRepositoryWriter( repositoryWriter, setting ):
"Write tab separated name and list to the repository writer."
repositoryWriter.write( setting.name )
for item in setting.value:
if item != '[]':
repositoryWriter.write(globalSpreadsheetSeparator)
repositoryWriter.write( item )
repositoryWriter.write('\n')
class StringSetting:
"A class to display, read & write a string."
def __init__(self):
"Set the update function to none."
self.entry = None
self.updateFunction = None
def __repr__(self):
"Get the string representation of this StringSetting."
return str(self.__dict__)
def addToDialog( self, gridPosition ):
"Add this to the dialog."
gridPosition.increment()
self.label = Tkinter.Label( gridPosition.master, text = self.name )
self.label.grid( row = gridPosition.row, column = 0, columnspan = 3, sticky = Tkinter.W )
self.createEntry( gridPosition.master )
self.setStateToValue()
self.entry.grid( row = gridPosition.row, column = 3, columnspan = 2, sticky = Tkinter.W )
self.bindEntry()
LabelHelp( self.repository.fileNameHelp, gridPosition.master, self.name, self.label )
def addToMenu( self, repositoryMenu ):
"Do nothing because this should only be added to a frameable repository menu."
pass
def addToMenuFrameable( self, repositoryMenu ):
"Add this to the frameable repository menu."
titleFromName = getTitleFromName( self.name )
helpWindowMenu = Tkinter.Menu( repositoryMenu, tearoff = 0 )
repositoryMenu.add_cascade( label = titleFromName, menu = helpWindowMenu, underline = 0 )
if self.name in self.repository.frameList.value:
helpWindowMenu.add_command( label = 'Remove from Window', command = self.removeFromWindow )
else:
helpWindowMenu.add_command( label = 'Add to Window', command = self.addToWindow )
helpWindowMenu.add_separator()
helpWindowMenu.add_command( label = 'Help', command = HelpPage().getOpenFromDocumentationSubName( self.repository.fileNameHelp + '#' + titleFromName ) )
def addToWindow(self):
"Add this to the repository frame list."
self.repository.frameList.addToList( self.name )
def bindEntry(self):
"Bind the entry to the update function."
if self.updateFunction != None:
self.entry.bind('<Return>', self.updateFunction )
def createEntry( self, root ):
"Create the entry."
self.entry = Tkinter.Entry( root )
def getFromValue( self, name, repository, value ):
"Initialize."
return self.getFromValueOnlyAddToRepository( name, repository, value )
def getFromValueOnly( self, name, repository, value ):
"Initialize."
self.defaultValue = value
self.name = name
self.repository = repository
self.value = value
return self
def getFromValueOnlyAddToRepository( self, name, repository, value ):
"Initialize."
repository.displayEntities.append(self)
repository.menuEntities.append(self)
repository.preferences.append(self)
return self.getFromValueOnly( name, repository, value )
def removeFromWindow(self):
"Remove this from the repository frame list."
self.repository.frameList.removeFromList( self.name )
def setStateToValue(self):
"Set the entry to the value."
setEntryText(self.entry, self.value)
def setToDisplay(self):
"Set the string to the entry field."
try:
valueString = self.entry.get()
self.setValueToString( valueString )
except:
pass
def setUpdateFunction( self, updateFunction ):
"Set the update function."
self.updateFunction = updateFunction
def setValueToSplitLine( self, lineIndex, lines, splitLine ):
"Set the value to the second word of a split line."
self.setValueToString(splitLine[1])
def setValueToString( self, valueString ):
"Set the value to the value string."
self.value = valueString
def updateSaveListeners(self):
"Update save listeners if any."
pass
def writeToRepositoryWriter( self, repositoryWriter ):
"Write tab separated name and value to the repository writer."
repositoryWriter.write('%s%s%s\n' % ( self.name, globalSpreadsheetSeparator, self.value ) )
class BooleanSetting( StringSetting ):
"A class to display, read & write a boolean."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
gridPosition.increment()
self.checkbutton = Tkinter.Checkbutton( gridPosition.master, command = self.toggleCheckbutton, text = self.name )
#toggleCheckbutton is being used instead of a Tkinter IntVar because there is a weird bug where it doesn't work properly if this setting is not on the first window.
self.checkbutton.grid( row = gridPosition.row, columnspan = 5, sticky = Tkinter.W )
self.setStateToValue()
LabelHelp( self.repository.fileNameHelp, gridPosition.master, self.name, self.checkbutton )
def addToMenu( self, repositoryMenu ):
"Add this to the repository menu."
self.activateToggleMenuCheckbutton = False
#activateToggleMenuCheckbutton is being used instead of setting command after because add_checkbutton does not return a checkbutton.
repositoryMenu.add_checkbutton( label = getTitleFromName( self.name ), command = self.toggleMenuCheckbutton )
if self.value:
repositoryMenu.invoke( repositoryMenu.index( Tkinter.END ) )
self.activateToggleMenuCheckbutton = True
def addToMenuFrameable( self, repositoryMenu ):
"Add this to the frameable repository menu."
titleFromName = getTitleFromName( self.name )
helpWindowMenu = Tkinter.Menu( repositoryMenu, tearoff = 0 )
repositoryMenu.add_cascade( label = titleFromName, menu = helpWindowMenu, underline = 0 )
self.addToMenu( helpWindowMenu )
helpWindowMenu.add_separator()
helpWindowMenu.add_command( label = 'Help', command = HelpPage().getOpenFromDocumentationSubName( self.repository.fileNameHelp + '#' + titleFromName ) )
def setStateToValue(self):
"Set the checkbutton to the boolean."
try:
if self.value:
self.checkbutton.select()
else:
self.checkbutton.deselect()
except:
pass
def setToDisplay(self):
"Do nothing because toggleCheckbutton is handling the value."
pass
def setValueToString( self, valueString ):
"Set the boolean to the string."
self.value = ( valueString.lower() == 'true')
def toggleCheckbutton(self):
"Workaround for Tkinter bug, toggle the value."
self.value = not self.value
self.setStateToValue()
if self.updateFunction != None:
self.updateFunction()
def toggleMenuCheckbutton(self):
"Workaround for Tkinter bug, toggle the value."
if self.activateToggleMenuCheckbutton:
self.value = not self.value
if self.updateFunction != None:
self.updateFunction()
class CloseListener:
"A class to listen to link a window to the global repository dialog list table."
def __init__( self, window, closeFunction = None ):
"Add the window to the global repository dialog list table."
self.closeFunction = closeFunction
self.window = window
self.shouldWasClosedBeBound = True
global globalRepositoryDialogListTable
euclidean.addElementToListDictionaryIfNotThere( window, window, globalRepositoryDialogListTable )
def listenToWidget( self, widget ):
"Listen to the destroy message of the widget."
if self.shouldWasClosedBeBound:
self.shouldWasClosedBeBound = False
widget.bind('<Destroy>', self.wasClosed )
def wasClosed(self, event):
"The dialog was closed."
global globalCloseListTables
for globalCloseListTable in globalCloseListTables:
if self.window in globalCloseListTable:
del globalCloseListTable[ self.window ]
if self.closeFunction != None:
self.closeFunction()
class DisplayToolButton:
"A class to display the tool dialog button, in a two column wide table."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
self.displayButton = Tkinter.Button( gridPosition.master, activebackground = 'black', activeforeground = 'white', text = getEachWordCapitalized( self.name ), command = self.displayDialog )
setButtonFontWeightString( self.displayButton, self.important )
gridPosition.incrementGivenNumberOfColumns(2)
self.displayButton.grid( row = gridPosition.row, column = gridPosition.column, columnspan = 2 )
def displayDialog(self):
"Display function."
ToolDialog().getFromPath( self.path ).display()
def getFromPath( self, important, name, path, repository ):
"Initialize."
self.important = important
self.name = name
self.path = path
self.repository = repository
repository.displayEntities.append(self)
return self
class FileHelpMenuBar:
def __init__( self, root ):
"Create a menu bar with a file and help menu."
self.underlineLetters = []
self.menuBar = Tkinter.Menu( root )
self.root = root
root.config( menu = self.menuBar )
self.fileMenu = Tkinter.Menu( self.menuBar, tearoff = 0 )
self.menuBar.add_cascade( label = "File", menu = self.fileMenu, underline = 0 )
self.underlineLetters.append('f')
def addMenuToMenuBar( self, labelText, menu ):
"Add a menu to the menu bar."
lowerLabelText = labelText.lower()
for underlineLetterIndex in xrange( len( lowerLabelText ) ):
underlineLetter = lowerLabelText[ underlineLetterIndex ]
if underlineLetter not in self.underlineLetters:
self.underlineLetters.append( underlineLetter )
self.menuBar.add_cascade( label = labelText, menu = menu, underline = underlineLetterIndex )
return
self.menuBar.add_cascade( label = labelText, menu = menu )
def addPluginToMenuBar( self, modulePath, repository, window ):
"Add a menu to the menu bar from a tool."
pluginModule = archive.getModuleWithPath( modulePath )
if pluginModule == None:
print('this should never happen, pluginModule in addMenuToMenuBar in settings is None.')
return None
repositoryMenu = Tkinter.Menu( self.menuBar, tearoff = 0 )
labelText = getEachWordCapitalized( os.path.basename( modulePath ) )
self.addMenuToMenuBar( labelText, repositoryMenu )
pluginModule.addToMenu( self.root, repositoryMenu, repository, window )
def completeMenu(self, closeFunction, repository, saveFunction, window):
"Complete the menu."
self.closeFunction = closeFunction
self.saveFunction = saveFunction
addAcceleratorCommand('<Control-KeyPress-s>', saveFunction, self.root, self.fileMenu, 'Save')
self.fileMenu.add_command(label = "Save and Close", command = self.saveClose)
addAcceleratorCommand('<Control-KeyPress-w>', closeFunction, self.root, self.fileMenu, 'Close')
self.fileMenu.add_separator()
addAcceleratorCommand('<Control-KeyPress-q>', quitWindows, self.root, self.fileMenu, 'Quit')
skeinforgePluginsPath = archive.getSkeinforgePath('skeinforge_plugins')
pluginFileNames = archive.getPluginFileNamesFromDirectoryPath(skeinforgePluginsPath)
for pluginFileName in pluginFileNames:
self.addPluginToMenuBar(os.path.join(skeinforgePluginsPath, pluginFileName), repository, window)
def saveClose(self):
"Call the save function then the close function."
self.saveFunction()
self.closeFunction()
class FileNameInput( StringSetting ):
"A class to display, read & write a fileName."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
self.gridPosition = gridPosition
gridPosition.executables.append(self)
def execute(self):
"Open the file picker."
self.wasCancelled = False
parent = self.gridPosition.master
try:
import tkFileDialog
summarized = archive.getSummarizedFileName(self.value)
initialDirectory = os.path.dirname( summarized )
if len( initialDirectory ) > 0:
initialDirectory += os.sep
else:
initialDirectory = "."
fileName = tkFileDialog.askopenfilename( filetypes = self.getFileNameFirstTypes(), initialdir = initialDirectory, initialfile = os.path.basename( summarized ), parent = parent, title = self.name )
self.setCancelledValue(fileName)
return
except:
print('Could not get the old directory in settings, so the file picker will be opened in the default directory.')
try:
fileName = tkFileDialog.askopenfilename( filetypes = self.getFileNameFirstTypes(), initialdir = '.', initialfile = '', parent = parent, title = self.name )
self.setCancelledValue(fileName)
except:
print('Error in execute in FileName in settings, ' + self.name )
def getFileNameFirstTypes(self):
"Get the file types with the file type of the fileName moved to the front of the list."
allFiles = [ ('All', '*.*') ]
try:
basename = os.path.basename(self.value)
splitFile = basename.split('.')
allReadables = []
if len( self.fileTypes ) > 1:
for fileType in self.fileTypes:
allReadable = ( ('All Readable', fileType[1] ) )
allReadables.append( allReadable )
if len( splitFile ) < 1:
return allReadables + allFiles + self.fileTypes
baseExtension = splitFile[-1]
for fileType in self.fileTypes:
fileExtension = fileType[1].split('.')[-1]
if fileExtension == baseExtension:
fileNameFirstTypes = self.fileTypes[:]
fileNameFirstTypes.remove( fileType )
return [ fileType ] + allReadables + allFiles + fileNameFirstTypes
return allReadables + allFiles + self.fileTypes
except:
return allFiles
def getFromFileName( self, fileTypes, name, repository, value ):
"Initialize."
self.getFromValueOnly( name, repository, value )
self.fileTypes = fileTypes
self.wasCancelled = False
repository.displayEntities.append(self)
repository.preferences.append(self)
return self
def setCancelledValue( self, fileName ):
"Set the value to the file name and wasCancelled true if a file was not picked."
if ( str(fileName) == '()' or str(fileName) == ''):
self.wasCancelled = True
else:
self.value = fileName
def setToDisplay(self):
"Do nothing because the file dialog is handling the value."
pass
class FloatSetting( StringSetting ):
"A class to display, read & write a float."
def setValueToString( self, valueString ):
"Set the float to the string."
try:
self.value = float( valueString )
except:
print('Oops, can not read float ' + self.name + ' ' + valueString )
class FloatSpin( FloatSetting ):
"A class to display, read & write an float in a spin box."
def addToMenuFrameable( self, repositoryMenu ):
"Add this to the frameable repository menu."
titleFromName = getTitleFromName( self.name )
helpWindowMenu = Tkinter.Menu( repositoryMenu, tearoff = 0 )
repositoryMenu.add_cascade( label = titleFromName, menu = helpWindowMenu, underline = 0 )
if self.name in self.repository.frameList.value:
helpWindowMenu.add_command( label = 'Remove from Window', command = self.removeFromWindow )
else:
helpWindowMenu.add_command( label = 'Add to Window', command = self.addToWindow )
helpWindowMenu.add_separator()
changeString = ' by %s' % self.increment
helpWindowMenu.add_command( label = 'Increase' + changeString, command = self.increase )
helpWindowMenu.add_command( label = 'Decrease' + changeString, command = self.decrease )
helpWindowMenu.add_separator()
helpWindowMenu.add_command( label = 'Help', command = HelpPage().getOpenFromDocumentationSubName( self.repository.fileNameHelp + '#' + titleFromName ) )
def bindEntry(self):
"Bind the entry to the update function."
self.entry.bind('<Return>', self.entryUpdated )
self.setColor()
def createEntry( self, root ):
"Create the entry."
self.entry = Tkinter.Spinbox( root, command = self.setColorToDisplay, from_ = self.from_, increment = self.increment, to = self.to )
def decrease(self):
"Decrease the value then set the state and color to the value."
self.value -= self.increment
self.setStateUpdateColor()
def entryUpdated(self, event=None):
"Create the entry."
self.setColorToDisplay()
if self.updateFunction != None:
self.updateFunction(event)
def getFromValue(self, from_, name, repository, to, value):
"Initialize."
self.backgroundColor = None
self.from_ = from_
self.minimumWidth = min(value - from_, to - value)
rank = euclidean.getRank(0.05 * (to - from_))
self.increment = euclidean.getIncrementFromRank(rank)
self.to = to
return self.getFromValueOnlyAddToRepository(name, repository, value)
def increase(self):
"Increase the value then set the state and color to the value."
self.value += self.increment
self.setStateUpdateColor()
def setColor(self, event=None):
"Set the color to the value, yellow if it is lower than the default and blue if it is higher."
setSpinColor(self)
def setColorToDisplay(self, event=None):
"Set the color to the value, yellow if it is lower than the default and blue if it is higher."
self.setToDisplay()
self.setColor()
def setStateToValue(self):
"Set the entry to the value."
setEntryText( self.entry, self.value )
self.setColor()
def setStateUpdateColor(self):
"Set the state to the value, call the update function, then set the color."
self.setStateToValue()
if self.updateFunction != None:
self.updateFunction()
class FloatSpinNotOnMenu( FloatSpin ):
"A class to display, read & write an float in a spin box, which is not to be added to a menu."
def getFromValueOnlyAddToRepository( self, name, repository, value ):
"Initialize."
repository.displayEntities.append(self)
repository.preferences.append(self)
return self.getFromValueOnly( name, repository, value )
class FloatSpinUpdate( FloatSpin ):
"A class to display, read, update & write an float in a spin box."
def createEntry( self, root ):
"Create the entry."
self.entry = Tkinter.Spinbox( root, command = self.entryUpdated, from_ = self.from_, increment = self.increment, to = self.to )
class FrameList:
"A class to list the frames."
def addToList(self, word):
"Add the word to the sorted list."
self.value.append(word)
self.value.sort()
self.repository.window.redisplayWindowUpdate()
def getFromValue( self, name, repository, value ):
"Initialize."
repository.preferences.append(self)
self.name = name
self.repository = repository
self.value = value
return self
def removeFromList(self, word):
"Remove the word from the sorted list."
self.value.remove(word)
self.value.sort()
self.repository.window.redisplayWindowUpdate()
def setToDisplay(self):
"Do nothing because frame list does not have a display."
pass
def setValueToSplitLine( self, lineIndex, lines, splitLine ):
"Set the value to the second and later words of a split line."
self.value = splitLine[1 :]
def updateSaveListeners(self):
"Update save listeners if any."
pass
def writeToRepositoryWriter( self, repositoryWriter ):
"Write tab separated name and list to the repository writer."
writeValueListToRepositoryWriter( repositoryWriter, self )
class GridHorizontal:
"A class to place elements horizontally on a grid."
def __init__( self, column, row ):
"Initialize the column and row."
self.column = column
self.columnStart = column
self.row = row
def getCopy(self):
"Get a copy."
copy = GridHorizontal( self.column, self.row )
copy.columnStart = self.columnStart
return copy
def increment(self):
"Increment the position horizontally."
self.column += 1
class GridVertical:
"A class to place elements vertically on a grid."
def __init__( self, column, row ):
"Initialize the column and row."
self.column = column
self.columnOffset = column
self.columnStart = column
self.row = row
self.rowStart = row
def execute(self):
"The execute button was clicked."
for executable in self.executables:
executable.execute()
saveAll()
self.repository.execute()
def getCopy(self):
"Get a copy."
copy = GridVertical( self.column, self.row )
copy.columnOffset = self.columnOffset
copy.columnStart = self.columnStart
copy.rowStart = self.rowStart
return copy
def increment(self):
"Increment the position vertically."
self.column = self.columnStart
self.columnOffset = self.columnStart
self.row += 1
def incrementGivenNumberOfColumns( self, numberOfColumns ):
"Increment the position vertically and offset it horizontally by the given number of columns."
self.column = self.columnOffset
if self.columnOffset == self.columnStart:
self.columnOffset = self.columnStart + 1
self.row += 1
return
if self.columnOffset < self.columnStart + numberOfColumns - 1:
self.columnOffset += 1
return
self.columnOffset = self.columnStart
def setExecutablesRepository( self, repository ):
"Set the executables to an empty list and set the repository."
self.executables = []
self.repository = repository
class HelpPage:
"A class to open a help page."
def __init__(self):
"Initialize column."
self.column = 3
def addToDialog( self, gridPosition ):
"Add this to the dialog."
capitalizedName = getEachWordCapitalized( self.name )
self.displayButton = Tkinter.Button( gridPosition.master, activebackground = 'black', activeforeground = 'white', command = self.openPage, text = capitalizedName )
if len( capitalizedName ) < 12:
self.displayButton['width'] = 10
self.displayButton.grid( row = gridPosition.row, column = self.column, columnspan = 2 )
def addToMenu( self, repositoryMenu ):
"Add this to the repository menu."
repositoryMenu.add_command( label = getTitleFromName( self.name ), command = self.openPage )
def addToMenuFrameable( self, repositoryMenu ):
"Add this to the frameable repository menu."
self.addToMenu( repositoryMenu )
def getFromNameAfterHTTP( self, afterHTTP, name, repository ):
"Initialize."
self.setToNameRepository( name, repository )
self.hypertextAddress = 'http://' + afterHTTP
return self
def getFromNameAfterWWW( self, afterWWW, name, repository ):
"Initialize."
self.setToNameRepository( name, repository )
self.hypertextAddress = 'http://www.' + afterWWW
return self
def getFromNameSubName( self, name, repository, subName=''):
"Initialize."
self.setToNameRepository( name, repository )
self.hypertextAddress = archive.getDocumentationPath( subName )
return self
def getOpenFromAbsolute( self, hypertextAddress ):
"Get the open help page function from the hypertext address."
self.hypertextAddress = hypertextAddress
return self.openPage
def getOpenFromAfterHTTP( self, afterHTTP ):
"Get the open help page function from the part of the address after the HTTP."
self.hypertextAddress = 'http://' + afterHTTP
return self.openPage
def getOpenFromAfterWWW( self, afterWWW ):
"Get the open help page function from the afterWWW of the address after the www."
self.hypertextAddress = 'http://www.' + afterWWW
return self.openPage
def getOpenFromDocumentationSubName( self, subName=''):
"Get the open help page function from the afterWWW of the address after the www."
self.hypertextAddress = archive.getDocumentationPath( subName )
return self.openPage
def openPage(self, event=None):
"Open the browser to the hypertext address."
openWebPage( self.hypertextAddress )
def setToNameRepository( self, name, repository ):
"Set to the name and repository."
self.name = name
self.repository = repository
repository.displayEntities.append(self)
repository.menuEntities.append(self)
class HelpPageRepository:
"A class to open a repository help page."
def __init__( self, repository ):
"Add this to the dialog."
self.repository = repository
def openPage(self, event=None):
"Open the browser to the repository help page."
if self.repository.openWikiManualHelpPage == None:
self.repository.openLocalHelpPage()
return
from skeinforge_application.skeinforge_utilities import skeinforge_help
helpRepository = getReadRepository( skeinforge_help.HelpRepository() )
if helpRepository.wikiManualPrimary.value:
self.repository.openWikiManualHelpPage()
return
self.repository.openLocalHelpPage()
class IntSetting( FloatSetting ):
"A class to display, read & write an int."
def setValueToString( self, valueString ):
"Set the integer to the string."
setIntegerValueToString( self, valueString )
class IntSpin(FloatSpin):
"A class to display, read & write an int in a spin box."
def getFromValue(self, from_, name, repository, to, value):
"Initialize."
self.backgroundColor = None
self.from_ = from_
rank = euclidean.getRank(0.05 * (to - from_))
self.increment = max(1, int(euclidean.getIncrementFromRank(rank)))
self.minimumWidth = min(value - from_, to - value)
self.to = to
return self.getFromValueOnlyAddToRepository(name, repository, value)
def getSingleIncrementFromValue( self, from_, name, repository, to, value ):
"Initialize."
self.backgroundColor = None
self.from_ = from_
self.increment = 1
self.minimumWidth = min(value - from_, to - value)
self.to = to
return self.getFromValueOnlyAddToRepository( name, repository, value )
def setValueToString( self, valueString ):
"Set the integer to the string."
setIntegerValueToString( self, valueString )
class IntSpinNotOnMenu( IntSpin ):
"A class to display, read & write an integer in a spin box, which is not to be added to a menu."
def getFromValueOnlyAddToRepository( self, name, repository, value ):
"Initialize."
repository.displayEntities.append(self)
repository.preferences.append(self)
return self.getFromValueOnly( name, repository, value )
class IntSpinUpdate( IntSpin ):
"A class to display, read, update & write an int in a spin box."
def createEntry( self, root ):
"Create the entry."
self.entry = Tkinter.Spinbox( root, command = self.entryUpdated, from_ = self.from_, increment = self.increment, to = self.to )
class LabelDisplay:
"A class to add a label."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
gridPosition.increment()
self.label = Tkinter.Label( gridPosition.master, text = self.name )
self.label.grid( row = gridPosition.row, column = 0, columnspan = self.columnspan, sticky = Tkinter.W )
LabelHelp( self.repository.fileNameHelp, gridPosition.master, self.name, self.label )
def getFromName( self, name, repository ):
"Initialize."
self.columnspan = 3
self.name = name
self.repository = repository
repository.displayEntities.append(self)
return self
class LabelHelp:
"A class to add help to a widget."
def __init__( self, fileNameHelp, master, name, widget ):
"Add menu to the widget."
if len( name ) < 1:
return
self.popupMenu = Tkinter.Menu( master, tearoff = 0 )
titleFromName = getTitleFromName( name.replace('- ', '').replace(' -', '') )
self.popupMenu.add_command( label = 'Help', command = HelpPage().getOpenFromDocumentationSubName( fileNameHelp + '#' + titleFromName ) )
widget.bind('<Button-1>', self.unpostPopupMenu )
widget.bind('<Button-2>', self.unpostPopupMenu )
widget.bind('<Button-3>', self.displayPopupMenu )
def displayPopupMenu(self, event=None):
'Display the popup menu when the button is right clicked.'
try:
self.popupMenu.tk_popup( event.x_root + 30, event.y_root, 0 )
finally:
self.popupMenu.grab_release()
def unpostPopupMenu(self, event=None):
'Unpost the popup menu.'
self.popupMenu.unpost()
class LabelSeparator:
"A class to add a label and menu separator."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
gridPosition.increment()
self.label = Tkinter.Label( gridPosition.master, text='')
self.label.grid( row = gridPosition.row, column = 0, columnspan = 3, sticky = Tkinter.W )
def addToMenu( self, repositoryMenu ):
"Add this to the repository menu."
repositoryMenu.add_separator()
def addToMenuFrameable( self, repositoryMenu ):
"Add this to the frameable repository menu."
self.addToMenu( repositoryMenu )
def getFromRepository( self, repository ):
"Initialize."
self.name = ''
self.repository = repository
repository.displayEntities.append(self)
repository.menuEntities.append(self)
return self
class LatentStringVar:
"A class to provide a StringVar when needed."
def __init__(self):
"Set the string var."
self.stringVar = None
def getString(self):
"Get the string."
return self.getVar().get()
def getVar(self):
"Get the string var."
if self.stringVar == None:
self.stringVar = Tkinter.StringVar()
return self.stringVar
def setString(self, word):
"Set the string."
self.getVar().set(word)
class LayerCount:
'A class to handle the layerIndex.'
def __init__(self):
'Initialize.'
self.layerIndex = -1
def __repr__(self):
'Get the string representation of this LayerCount.'
return str(self.layerIndex)
def printProgressIncrement(self, procedureName):
'Print progress then increment layerIndex.'
self.layerIndex += 1
printProgress(self.layerIndex, procedureName)
class MenuButtonDisplay:
"A class to add a menu button."
def addRadiosToDialog( self, gridPosition ):
"Add the menu radios to the dialog."
for menuRadio in self.menuRadios:
menuRadio.addToDialog( gridPosition )
def addToMenu( self, repositoryMenu ):
"Add this to the repository menu."
if len( self.menuRadios ) < 1:
print('The MenuButtonDisplay in settings should have menu items.')
print(self.name)
return
self.menu = Tkinter.Menu( repositoryMenu, tearoff = 0 )
repositoryMenu.add_cascade( label = getTitleFromName( self.name ), menu = self.menu )
self.setRadioVarToName( self.menuRadios[0].name )
def addToMenuFrameable( self, repositoryMenu ):
"Add this to the frameable repository menu."
titleFromName = getTitleFromName( self.name )
self.addToMenu( repositoryMenu )
self.menu.add_command( label = 'Help', command = HelpPage().getOpenFromDocumentationSubName( self.repository.fileNameHelp + '#' + titleFromName ) )
self.menu.add_separator()
def getFromName( self, name, repository ):
"Initialize."
self.columnspan = 2
self.menuRadios = []
self.name = name
self.radioVar = None
self.repository = repository
repository.menuEntities.append(self)
return self
def removeMenus(self):
"Remove all menus."
deleteMenuItems( self.menu )
self.menuRadios = []
def setRadioVarToName(self, name):
"Get the menu button."
self.optionList = [name]
self.radioVar = Tkinter.StringVar()
self.radioVar.set( self.optionList[0] )
def setToNameAddToDialog( self, name, gridPosition ):
"Get the menu button."
if self.radioVar != None:
return
gridPosition.increment()
self.setRadioVarToName( name )
self.label = Tkinter.Label( gridPosition.master, text = self.name )
self.label.grid( row = gridPosition.row, column = 0, columnspan = 3, sticky = Tkinter.W )
self.menuButton = Tkinter.OptionMenu( gridPosition.master, self.radioVar, self.optionList )
self.menuButton.grid( row = gridPosition.row, column = 3, columnspan = self.columnspan, sticky = Tkinter.W )
self.menuButton.menu = Tkinter.Menu( self.menuButton, tearoff = 0 )
self.menu = self.menuButton.menu
self.menuButton['menu'] = self.menu
LabelHelp( self.repository.fileNameHelp, gridPosition.master, self.name, self.label )
class MenuRadio( BooleanSetting ):
"A class to display, read & write a boolean with associated menu radio button."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
self.menuButtonDisplay.setToNameAddToDialog( self.name, gridPosition )
self.addToSubmenu()
def addToMenu( self, repositoryMenu ):
"Add this to the submenu set by MenuButtonDisplay, the repository menu is ignored"
self.addToSubmenu()
def addToMenuFrameable( self, repositoryMenu ):
"Add this to the frameable repository menu."
self.addToMenu( repositoryMenu )
def addToSubmenu(self):
"Add this to the submenu."
self.activate = False
menu = self.menuButtonDisplay.menu
menu.add_radiobutton( label = self.name, command = self.clickRadio, value = self.name, variable = self.menuButtonDisplay.radioVar )
self.menuLength = menu.index( Tkinter.END )
if self.value:
self.menuButtonDisplay.radioVar.set( self.name )
self.invoke()
self.activate = True
def clickRadio(self):
"Workaround for Tkinter bug, invoke and set the value when clicked."
if not self.activate:
return
self.menuButtonDisplay.radioVar.set( self.name )
if self.updateFunction != None:
self.updateFunction()
def getFromMenuButtonDisplay( self, menuButtonDisplay, name, repository, value ):
"Initialize."
self.getFromValueOnlyAddToRepository( name, repository, value )
self.menuButtonDisplay = menuButtonDisplay
self.menuButtonDisplay.menuRadios.append(self)
return self
def invoke(self):
"Workaround for Tkinter bug, invoke to set the value when changed."
self.menuButtonDisplay.menu.invoke( self.menuLength )
def setStateToValue(self):
"Set the checkbutton to the boolean."
try:
if self.value:
self.invoke()
except:
pass
def setToDisplay(self):
"Set the boolean to the checkbutton."
if self.menuButtonDisplay.radioVar != None:
self.value = ( self.menuButtonDisplay.radioVar.get() == self.name )
class PluginFrame:
"A class to display the plugins in a frame."
def __init__(self):
"Initialize."
self.gridTable = {}
self.latentStringVar = LatentStringVar()
self.oldLatentString = ''
def addToDialog( self, gridPosition ):
"Add this to the dialog."
gridPosition.increment()
self.gridPosition = gridPosition.getCopy()
self.gridPosition.master = gridPosition.master
self.createFrame( gridPosition )
def createFrame( self, gridPosition ):
"Create the frame."
gridVertical = GridVertical( 0, 0 )
gridVertical.master = Tkinter.LabelFrame( gridPosition.master, borderwidth = 3, relief = 'raised')
gridVertical.master.grid( row = gridPosition.row, column = gridPosition.column, columnspan = 12, sticky = Tkinter.E + Tkinter.W + Tkinter.N + Tkinter.S )
gridPosition.master.grid_rowconfigure( gridPosition.row, weight = 1 )
gridPosition.master.grid_columnconfigure( gridPosition.column + 11, weight = 1 )
if self.latentStringVar.getString() == '':
self.defaultRadioButton.setSelect()
self.gridTable[ self.latentStringVar.getString() ] = gridVertical
path = os.path.join( self.directoryPath, self.latentStringVar.getString() )
pluginModule = archive.getModuleWithPath(path)
if pluginModule == None:
print('this should never happen, pluginModule in addToDialog in PluginFrame in settings is None')
print(path)
return
gridVertical.repository = getReadRepository( pluginModule.getNewRepository() )
gridVertical.frameGridVertical = GridVertical( 0, 0 )
gridVertical.frameGridVertical.setExecutablesRepository( gridVertical.repository )
executeTitle = gridVertical.repository.executeTitle
if executeTitle != None:
executeButton = Tkinter.Button( gridVertical.master, activebackground = 'black', activeforeground = 'blue', text = executeTitle, command = gridVertical.frameGridVertical.execute )
executeButton.grid( row = gridVertical.row, column = gridVertical.column, sticky = Tkinter.W )
gridVertical.column += 1
self.helpButton = Tkinter.Button( gridVertical.master, activebackground = 'black', activeforeground = 'white', text = "?", command = HelpPageRepository( gridVertical.repository ).openPage )
self.helpButton.grid( row = gridVertical.row, column = gridVertical.column, sticky = Tkinter.W )
addEmptyRow( gridVertical )
gridVertical.increment()
from fabmetheus_utilities.hidden_scrollbar import HiddenScrollbar
gridVertical.xScrollbar = HiddenScrollbar( gridVertical.master, orient = Tkinter.HORIZONTAL )
gridVertical.xScrollbar.grid( row = gridVertical.row + 1, column = gridVertical.column, columnspan = 11, sticky = Tkinter.E + Tkinter.W )
gridVertical.yScrollbar = HiddenScrollbar( gridVertical.master )
gridVertical.yScrollbar.grid( row = gridVertical.row, column = gridVertical.column + 12, sticky = Tkinter.N + Tkinter.S )
canvasHeight = min( 1000, gridPosition.master.winfo_screenheight() - 540 ) - 6 - int( gridVertical.xScrollbar['width'] )
canvasWidth = min( 650, gridPosition.master.winfo_screenwidth() - 100 ) - 6 - int( gridVertical.yScrollbar['width'] )
gridVertical.canvas = Tkinter.Canvas( gridVertical.master, height = canvasHeight, highlightthickness = 0, width = canvasWidth )
gridVertical.frameGridVertical.master = Tkinter.Frame( gridVertical.canvas )
for setting in gridVertical.repository.displayEntities:
setting.addToDialog( gridVertical.frameGridVertical )
addEmptyRow( gridVertical.frameGridVertical )
gridVertical.frameGridVertical.master.update_idletasks()
gridVertical.xScrollbar.config( command = gridVertical.canvas.xview )
gridVertical.canvas['xscrollcommand'] = gridVertical.xScrollbar.set
gridVertical.yScrollbar.config( command = gridVertical.canvas.yview )
gridVertical.canvas['yscrollcommand'] = gridVertical.yScrollbar.set
gridVertical.canvas.create_window( 0, 0, anchor = Tkinter.NW, window = gridVertical.frameGridVertical.master )
gridVertical.canvas['scrollregion'] = gridVertical.frameGridVertical.master.grid_bbox()
gridVertical.canvas.grid( row = gridVertical.row, column = gridVertical.column, columnspan = 12, sticky = Tkinter.E + Tkinter.W + Tkinter.N + Tkinter.S )
gridVertical.master.grid_rowconfigure( gridVertical.row, weight = 1 )
gridVertical.master.grid_columnconfigure( gridVertical.column + 11, weight = 1 )
gridVertical.frameGridVertical.master.lift()
self.oldLatentString = self.latentStringVar.getString()
def focusSetMaster( self, gridPosition ):
"Set the focus to the plugin master."
gridPosition.frameGridVertical.master.focus_set()
def getFromPath( self, defaultRadioButton, directoryPath, repository ):
"Initialize."
self.defaultRadioButton = defaultRadioButton
self.directoryPath = directoryPath
self.name = 'PluginFrame'
self.repository = repository
repository.displayEntities.append(self)
repository.preferences.append(self)
return self
def setStateToValue(self):
"Set the state of all the plugins to the value."
for gridTableValue in self.gridTable.values():
cancelRepository( gridTableValue.repository )
def setToDisplay(self):
"Set the plugins to the display."
pass
def update(self):
"Update the frame."
if len(self.gridTable) < 1:
return
if self.oldLatentString == self.latentStringVar.getString():
return
self.oldLatentString = self.latentStringVar.getString()
self.repository.preferences.remove(self)
for setting in self.repository.preferences:
setting.setToDisplay()
writeSettingsPrintMessage(self.repository)
self.repository.preferences.append(self)
if self.latentStringVar.getString() in self.gridTable:
gridPosition = self.gridTable[self.latentStringVar.getString()]
gridPosition.master.lift()
self.focusSetMaster(gridPosition)
return
self.createFrame(self.gridPosition)
def updateSaveListeners(self):
"Update save listeners if any."
gridTableKeys = self.gridTable.keys()
gridTableKeys.sort()
for gridTableKey in gridTableKeys:
saveRepository( self.gridTable[ gridTableKey ].repository )
def writeToRepositoryWriter( self, repositoryWriter ):
"Write tab separated name and value to the repository writer."
pass
class PluginGroupFrame( PluginFrame ):
"A class to display the plugin groups in a frame."
def createFrame( self, gridPosition ):
"Create the frame."
gridVertical = GridVertical( 0, 0 )
gridVertical.master = Tkinter.LabelFrame( gridPosition.master, borderwidth = 3, relief = 'raised')
gridVertical.master.grid( row = gridPosition.row, column = gridPosition.column, columnspan = 11, sticky = Tkinter.E + Tkinter.W + Tkinter.N + Tkinter.S )
gridPosition.master.grid_rowconfigure( gridPosition.row, weight = 1 )
gridPosition.master.grid_columnconfigure( gridPosition.column + 10, weight = 1 )
if self.latentStringVar.getString() == '':
self.defaultRadioButton.setSelect()
self.gridTable[ self.latentStringVar.getString() ] = gridVertical
path = os.path.join( self.directoryPath, self.latentStringVar.getString() )
pluginModule = archive.getModuleWithPath(path)
if pluginModule == None:
print('this should never happen, pluginModule in addToDialog in PluginFrame in settings is None')
print(path)
return
gridVertical.repository = getReadRepository( pluginModule.getNewRepository() )
gridVertical.setExecutablesRepository( gridVertical.repository )
executeTitle = gridVertical.repository.executeTitle
if executeTitle != None:
executeButton = Tkinter.Button( gridVertical.master, activebackground = 'black', activeforeground = 'blue', text = executeTitle, command = gridVertical.execute )
executeButton.grid( row = gridVertical.row, column = gridVertical.column, sticky = Tkinter.W )
gridVertical.column += 1
self.helpButton = Tkinter.Button( gridVertical.master, activebackground = 'black', activeforeground = 'white', text = "?", command = HelpPageRepository( gridVertical.repository ).openPage )
self.helpButton.grid( row = gridVertical.row, column = gridVertical.column, sticky = Tkinter.W )
addEmptyRow( gridVertical )
gridVertical.increment()
for setting in gridVertical.repository.displayEntities:
setting.addToDialog( gridVertical )
gridVertical.master.update_idletasks()
gridVertical.master.lift()
self.oldLatentString = self.latentStringVar.getString()
def focusSetMaster( self, gridPosition ):
"Set the focus to the plugin master."
gridPosition.master.focus_set()
class Radio( BooleanSetting ):
"A class to display, read & write a boolean with associated radio button."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
gridPosition.increment()
self.createRadioButton( gridPosition )
self.radiobutton.grid( row = gridPosition.row, column = 0, columnspan = 3, sticky = Tkinter.W )
self.setStateToValue()
def clickRadio(self):
"Workaround for Tkinter bug, set the value."
self.latentStringVar.setString( self.radiobutton['value'] )
if self.updateFunction != None:
self.updateFunction()
def createRadioButton( self, gridPosition ):
"Create the radio button."
self.radiobutton = Tkinter.Radiobutton( gridPosition.master, command = self.clickRadio, text = self.name, value = self.name, variable = self.latentStringVar.getVar() )
LabelHelp( self.repository.fileNameHelp, gridPosition.master, self.name, self.radiobutton )
def getFromRadio( self, latentStringVar, name, repository, value ):
"Initialize."
self.getFromValueOnly( name, repository, value )
self.latentStringVar = latentStringVar
repository.displayEntities.append(self)
repository.preferences.append(self)
#when addToMenu is added to this entity, the line below should be uncommented
# repository.menuEntities.append(self)
return self
def setSelect(self):
"Set the int var and select the radio button."
oldLatentStringValue = self.latentStringVar.getString()
self.latentStringVar.setString( self.radiobutton['value'] )
self.radiobutton.select()
if oldLatentStringValue == '':
return False
return oldLatentStringValue != self.latentStringVar.getString()
def setStateToValue(self):
"Set the checkbutton to the boolean."
if self.value:
if self.setSelect():
if self.updateFunction != None:
self.updateFunction()
def setToDisplay(self):
"Set the boolean to the checkbutton."
self.value = ( self.latentStringVar.getString() == self.radiobutton['value'] )
class RadioCapitalized( Radio ):
"A class to display, read & write a boolean with associated radio button."
def createRadioButton( self, gridPosition ):
"Create the radio button."
capitalizedName = getEachWordCapitalized( self.name )
self.radiobutton = Tkinter.Radiobutton( gridPosition.master, command = self.clickRadio, text = capitalizedName, value = self.name, variable = self.latentStringVar.getVar() )
class RadioCapitalizedButton( Radio ):
"A class to display, read & write a boolean with associated radio button."
def createRadioButton( self, gridPosition ):
"Create the radio button."
capitalizedName = getEachWordCapitalized( self.name )
self.radiobutton = Tkinter.Radiobutton( gridPosition.master, command = self.clickRadio, text = capitalizedName, value = self.name, variable = self.latentStringVar.getVar() )
self.displayButton = Tkinter.Button( gridPosition.master, activebackground = 'black', activeforeground = 'white', text = capitalizedName, command = self.displayDialog )
self.displayButton.grid( row = gridPosition.row, column = 3, columnspan = 2 )
def displayDialog(self):
"Display function."
ToolDialog().getFromPath( self.path ).display()
self.setSelect()
def getFromPath( self, latentStringVar, name, path, repository, value ):
"Initialize."
self.getFromRadio( latentStringVar, name, repository, value )
self.path = path
return self
class RadioPlugin( RadioCapitalized ):
"A class to display, read & write a boolean with associated radio button."
def addToDialog( self, gridPosition ):
"Add this to the dialog."
self.createRadioButton( gridPosition )
self.radiobutton['activeforeground'] = 'magenta'
self.radiobutton['selectcolor'] = 'white'
self.radiobutton['borderwidth'] = 3
self.radiobutton['indicatoron'] = 0
setButtonFontWeightString( self.radiobutton, self.important )
self.incrementGridPosition( gridPosition )
self.setStateToValue()
def getFromRadio( self, important, latentStringVar, name, repository, value ):
"Initialize."
self.important = important
return RadioCapitalized.getFromRadio( self, latentStringVar, name, repository, value )
def incrementGridPosition( self, gridPosition ):
"Increment the grid position."
gridPosition.incrementGivenNumberOfColumns( 10 )
self.radiobutton.grid( row = gridPosition.row, column = gridPosition.column, sticky = Tkinter.W )
class TextSetting( StringSetting ):
"A class to display, read & write a text."
def __init__(self):
"Set the update function to none."
self.tokenConversions = [
TokenConversion(),
TokenConversion('carriageReturn', '\r'),
TokenConversion('doubleQuote', '"'),
TokenConversion('newline', '\n'),
TokenConversion('semicolon', ';'),
TokenConversion('singleQuote', "'" ),
TokenConversion('tab', '\t') ]
self.updateFunction = None
def addToDialog( self, gridPosition ):
"Add this to the dialog."
gridPosition.increment()
self.label = Tkinter.Label( gridPosition.master, text = self.name )
self.label.grid( row = gridPosition.row, column = 0, columnspan = 3, sticky = Tkinter.W )
gridPosition.increment()
self.entry = Tkinter.Text( gridPosition.master )
self.setStateToValue()
self.entry.grid( row = gridPosition.row, column = 0, columnspan = 5, sticky = Tkinter.W )
LabelHelp( self.repository.fileNameHelp, gridPosition.master, self.name, self.label )
def getFromValue( self, name, repository, value ):
"Initialize."
self.getFromValueOnly( name, repository, value )
repository.displayEntities.append(self)
repository.preferences.append(self)
return self
def setStateToValue(self):
"Set the entry to the value."
try:
self.entry.delete( 1.0, Tkinter.END )
self.entry.insert( Tkinter.INSERT, self.value )
except:
pass
def setToDisplay(self):
"Set the string to the entry field."
valueString = self.entry.get( 1.0, Tkinter.END )
self.setValueToString( valueString )
def setValueToSplitLine( self, lineIndex, lines, splitLine ):
"Set the value to the second word of a split line."
replacedValue = splitLine[1]
for tokenConversion in reversed( self.tokenConversions ):
replacedValue = tokenConversion.getTokenizedString( replacedValue )
self.setValueToString( replacedValue )
def writeToRepositoryWriter( self, repositoryWriter ):
"Write tab separated name and value to the repository writer."
replacedValue = self.value
for tokenConversion in self.tokenConversions:
replacedValue = tokenConversion.getNamedString( replacedValue )
repositoryWriter.write('%s%s%s\n' % ( self.name, globalSpreadsheetSeparator, replacedValue ) )
class TokenConversion:
"A class to convert tokens in a string."
def __init__( self, name = 'replaceToken', token = '___replaced___'):
"Set the name and token."
self.replacedName = '___replaced___' + name
self.token = token
def getNamedString( self, text ):
"Get a string with the tokens changed to names."
return text.replace( self.token, self.replacedName )
def getTokenizedString( self, text ):
"Get a string with the names changed to tokens."
return text.replace( self.replacedName, self.token )
class ToolDialog:
"A class to display the tool repository dialog."
def addPluginToMenu( self, menu, path ):
"Add the display command to the menu."
name = os.path.basename(path)
self.path = path
menu.add_command( label = getEachWordCapitalized( name ) + '...', command = self.display )
def display(self):
"Display the tool repository dialog."
global globalRepositoryDialogListTable
for repositoryDialog in globalRepositoryDialogListTable:
if getPathInFabmetheusFromFileNameHelp( repositoryDialog.repository.fileNameHelp ) == self.path:
liftRepositoryDialogs( globalRepositoryDialogListTable[ repositoryDialog ] )
return
self.repositoryDialog = getDisplayedDialogFromPath( self.path )
def getFromPath( self, path ):
"Initialize and return display function."
self.path = path
return self
class WindowPosition( StringSetting ):
"A class to display, read & write a window position."
def addToDialog( self, gridPosition ):
"Set the root to later get the geometry."
self.root = gridPosition.master
self.setToDisplay()
def getFromValue( self, repository, value ):
"Initialize."
self.getFromValueOnly('WindowPosition', repository, value )
repository.displayEntities.append(self)
repository.preferences.append(self)
return self
def setToDisplay(self):
"Set the string to the window position."
try:
geometryString = self.root.geometry()
except:
return
if geometryString == '1x1+0+0':
return
firstPlusIndexPlusOne = geometryString.find('+') + 1
self.value = geometryString[ firstPlusIndexPlusOne : ]
def setWindowPosition(self):
"Set the window position."
movedGeometryString = '%sx%s+%s' % ( self.root.winfo_reqwidth(), self.root.winfo_reqheight(), self.value )
self.root.geometry( movedGeometryString )
class RepositoryDialog:
def __init__( self, repository, root ):
"Add entities to the dialog."
self.isFirst = ( len( globalRepositoryDialogListTable.keys() ) == 0 )
self.closeListener = CloseListener(self)
self.repository = repository
self.gridPosition = GridVertical( 0, - 1 )
self.gridPosition.setExecutablesRepository(repository)
self.gridPosition.master = root
self.root = root
self.openDialogListeners = []
repository.repositoryDialog = self
root.withdraw()
title = repository.title
if repository.fileNameInput != None:
title = os.path.basename( repository.fileNameInput.value ) + ' - ' + title
root.title( title )
fileHelpMenuBar = FileHelpMenuBar( root )
fileHelpMenuBar.completeMenu( self.close, repository, self.save, self )
for setting in repository.displayEntities:
setting.addToDialog( self.gridPosition )
if self.gridPosition.row < 20:
addEmptyRow( self.gridPosition )
self.addButtons( repository, root )
root.update_idletasks()
self.setWindowPositionDeiconify()
root.deiconify()
for openDialogListener in self.openDialogListeners:
openDialogListener.openDialog()
def __repr__(self):
"Get the string representation of this RepositoryDialog."
return self.repository.title
def addButtons( self, repository, root ):
"Add buttons to the dialog."
columnIndex = 0
self.gridPosition.increment()
saveCommand = self.save
saveText = 'Save'
if self.isFirst:
saveCommand = saveAll
saveText = 'Save All'
if repository.executeTitle != None:
executeButton = Tkinter.Button( root, activebackground = 'black', activeforeground = 'blue', text = repository.executeTitle, command = self.gridPosition.execute )
executeButton.grid( row = self.gridPosition.row, column = columnIndex, columnspan = 2, sticky = Tkinter.W )
columnIndex += 2
self.helpButton = Tkinter.Button( root, activebackground = 'black', activeforeground = 'white', text = "?", command = HelpPageRepository(self.repository).openPage )
self.helpButton.grid( row = self.gridPosition.row, column = columnIndex, sticky = Tkinter.W )
self.closeListener.listenToWidget( self.helpButton )
columnIndex += 6
cancelButton = Tkinter.Button( root, activebackground = 'black', activeforeground = 'orange', command = self.cancel, fg = 'orange', text = 'Cancel')
cancelButton.grid( row = self.gridPosition.row, column = columnIndex )
columnIndex += 1
self.saveButton = Tkinter.Button( root, activebackground = 'black', activeforeground = 'darkgreen', command = saveCommand, fg = 'darkgreen', text = saveText )
self.saveButton.grid( row = self.gridPosition.row, column = columnIndex )
def cancel(self, event=None):
"Set all entities to their saved state."
cancelRepository(self.repository)
def close(self, event=None):
"The dialog was closed."
try:
self.root.destroy()
except:
pass
def save(self, event=None):
"Set the entities to the dialog then write them."
saveRepository(self.repository)
def setWindowPositionDeiconify(self):
"Set the window position if that setting exists."
for setting in self.repository.preferences:
if setting.name == 'WindowPosition':
setting.setWindowPosition()
return
| agpl-3.0 | -6,720,858,821,804,479,000 | 37.560606 | 199 | 0.752367 | false | 3.538527 | false | false | false | 0.044122 |
wolverineav/horizon | openstack_dashboard/dashboards/admin/routers/urls.py | 8 | 1134 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.routers import views
ROUTER_URL = r'^(?P<router_id>[^/]+)/%s'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(ROUTER_URL % '$',
views.DetailView.as_view(),
name='detail'),
url(ROUTER_URL % 'update',
views.UpdateView.as_view(),
name='update'),
url(r'^(?P<l3_agent_id>[^/]+)/l3_agent_list',
views.L3AgentView.as_view(),
name='l3_agent_list'),
]
| apache-2.0 | 6,355,232,451,531,995,000 | 32.352941 | 78 | 0.65873 | false | 3.53271 | false | false | false | 0 |
Serag8/Bachelor | google_appengine/google/appengine/datastore/entity_pb.py | 1 | 137845 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
class PropertyValue_ReferenceValuePathElement(ProtocolBuffer.ProtocolMessage):
has_type_ = 0
type_ = ""
has_id_ = 0
id_ = 0
has_name_ = 0
name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = ""
def has_type(self): return self.has_type_
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = 0
def has_id(self): return self.has_id_
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_type()): self.set_type(x.type())
if (x.has_id()): self.set_id(x.id())
if (x.has_name()): self.set_name(x.name())
def Equals(self, x):
if x is self: return 1
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: type not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.type_))
if (self.has_id_): n += 2 + self.lengthVarInt64(self.id_)
if (self.has_name_): n += 2 + self.lengthString(len(self.name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_type_):
n += 1
n += self.lengthString(len(self.type_))
if (self.has_id_): n += 2 + self.lengthVarInt64(self.id_)
if (self.has_name_): n += 2 + self.lengthString(len(self.name_))
return n
def Clear(self):
self.clear_type()
self.clear_id()
self.clear_name()
def OutputUnchecked(self, out):
out.putVarInt32(122)
out.putPrefixedString(self.type_)
if (self.has_id_):
out.putVarInt32(128)
out.putVarInt64(self.id_)
if (self.has_name_):
out.putVarInt32(138)
out.putPrefixedString(self.name_)
def OutputPartial(self, out):
if (self.has_type_):
out.putVarInt32(122)
out.putPrefixedString(self.type_)
if (self.has_id_):
out.putVarInt32(128)
out.putVarInt64(self.id_)
if (self.has_name_):
out.putVarInt32(138)
out.putPrefixedString(self.name_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 116: break
if tt == 122:
self.set_type(d.getPrefixedString())
continue
if tt == 128:
self.set_id(d.getVarInt64())
continue
if tt == 138:
self.set_name(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatString(self.type_))
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatInt64(self.id_))
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
return res
class PropertyValue_PointValue(ProtocolBuffer.ProtocolMessage):
has_x_ = 0
x_ = 0.0
has_y_ = 0
y_ = 0.0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def x(self): return self.x_
def set_x(self, x):
self.has_x_ = 1
self.x_ = x
def clear_x(self):
if self.has_x_:
self.has_x_ = 0
self.x_ = 0.0
def has_x(self): return self.has_x_
def y(self): return self.y_
def set_y(self, x):
self.has_y_ = 1
self.y_ = x
def clear_y(self):
if self.has_y_:
self.has_y_ = 0
self.y_ = 0.0
def has_y(self): return self.has_y_
def MergeFrom(self, x):
assert x is not self
if (x.has_x()): self.set_x(x.x())
if (x.has_y()): self.set_y(x.y())
def Equals(self, x):
if x is self: return 1
if self.has_x_ != x.has_x_: return 0
if self.has_x_ and self.x_ != x.x_: return 0
if self.has_y_ != x.has_y_: return 0
if self.has_y_ and self.y_ != x.y_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_x_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: x not set.')
if (not self.has_y_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: y not set.')
return initialized
def ByteSize(self):
n = 0
return n + 18
def ByteSizePartial(self):
n = 0
if (self.has_x_):
n += 9
if (self.has_y_):
n += 9
return n
def Clear(self):
self.clear_x()
self.clear_y()
def OutputUnchecked(self, out):
out.putVarInt32(49)
out.putDouble(self.x_)
out.putVarInt32(57)
out.putDouble(self.y_)
def OutputPartial(self, out):
if (self.has_x_):
out.putVarInt32(49)
out.putDouble(self.x_)
if (self.has_y_):
out.putVarInt32(57)
out.putDouble(self.y_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 44: break
if tt == 49:
self.set_x(d.getDouble())
continue
if tt == 57:
self.set_y(d.getDouble())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_x_: res+=prefix+("x: %s\n" % self.DebugFormat(self.x_))
if self.has_y_: res+=prefix+("y: %s\n" % self.DebugFormat(self.y_))
return res
class PropertyValue_UserValue(ProtocolBuffer.ProtocolMessage):
has_email_ = 0
email_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
has_nickname_ = 0
nickname_ = ""
has_gaiaid_ = 0
gaiaid_ = 0
has_obfuscated_gaiaid_ = 0
obfuscated_gaiaid_ = ""
has_federated_identity_ = 0
federated_identity_ = ""
has_federated_provider_ = 0
federated_provider_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def email(self): return self.email_
def set_email(self, x):
self.has_email_ = 1
self.email_ = x
def clear_email(self):
if self.has_email_:
self.has_email_ = 0
self.email_ = ""
def has_email(self): return self.has_email_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def nickname(self): return self.nickname_
def set_nickname(self, x):
self.has_nickname_ = 1
self.nickname_ = x
def clear_nickname(self):
if self.has_nickname_:
self.has_nickname_ = 0
self.nickname_ = ""
def has_nickname(self): return self.has_nickname_
def gaiaid(self): return self.gaiaid_
def set_gaiaid(self, x):
self.has_gaiaid_ = 1
self.gaiaid_ = x
def clear_gaiaid(self):
if self.has_gaiaid_:
self.has_gaiaid_ = 0
self.gaiaid_ = 0
def has_gaiaid(self): return self.has_gaiaid_
def obfuscated_gaiaid(self): return self.obfuscated_gaiaid_
def set_obfuscated_gaiaid(self, x):
self.has_obfuscated_gaiaid_ = 1
self.obfuscated_gaiaid_ = x
def clear_obfuscated_gaiaid(self):
if self.has_obfuscated_gaiaid_:
self.has_obfuscated_gaiaid_ = 0
self.obfuscated_gaiaid_ = ""
def has_obfuscated_gaiaid(self): return self.has_obfuscated_gaiaid_
def federated_identity(self): return self.federated_identity_
def set_federated_identity(self, x):
self.has_federated_identity_ = 1
self.federated_identity_ = x
def clear_federated_identity(self):
if self.has_federated_identity_:
self.has_federated_identity_ = 0
self.federated_identity_ = ""
def has_federated_identity(self): return self.has_federated_identity_
def federated_provider(self): return self.federated_provider_
def set_federated_provider(self, x):
self.has_federated_provider_ = 1
self.federated_provider_ = x
def clear_federated_provider(self):
if self.has_federated_provider_:
self.has_federated_provider_ = 0
self.federated_provider_ = ""
def has_federated_provider(self): return self.has_federated_provider_
def MergeFrom(self, x):
assert x is not self
if (x.has_email()): self.set_email(x.email())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
if (x.has_nickname()): self.set_nickname(x.nickname())
if (x.has_gaiaid()): self.set_gaiaid(x.gaiaid())
if (x.has_obfuscated_gaiaid()): self.set_obfuscated_gaiaid(x.obfuscated_gaiaid())
if (x.has_federated_identity()): self.set_federated_identity(x.federated_identity())
if (x.has_federated_provider()): self.set_federated_provider(x.federated_provider())
def Equals(self, x):
if x is self: return 1
if self.has_email_ != x.has_email_: return 0
if self.has_email_ and self.email_ != x.email_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
if self.has_nickname_ != x.has_nickname_: return 0
if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
if self.has_gaiaid_ != x.has_gaiaid_: return 0
if self.has_gaiaid_ and self.gaiaid_ != x.gaiaid_: return 0
if self.has_obfuscated_gaiaid_ != x.has_obfuscated_gaiaid_: return 0
if self.has_obfuscated_gaiaid_ and self.obfuscated_gaiaid_ != x.obfuscated_gaiaid_: return 0
if self.has_federated_identity_ != x.has_federated_identity_: return 0
if self.has_federated_identity_ and self.federated_identity_ != x.federated_identity_: return 0
if self.has_federated_provider_ != x.has_federated_provider_: return 0
if self.has_federated_provider_ and self.federated_provider_ != x.federated_provider_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_email_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: email not set.')
if (not self.has_auth_domain_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: auth_domain not set.')
if (not self.has_gaiaid_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: gaiaid not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.email_))
n += self.lengthString(len(self.auth_domain_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
n += self.lengthVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_): n += 2 + self.lengthString(len(self.obfuscated_gaiaid_))
if (self.has_federated_identity_): n += 2 + self.lengthString(len(self.federated_identity_))
if (self.has_federated_provider_): n += 2 + self.lengthString(len(self.federated_provider_))
return n + 4
def ByteSizePartial(self):
n = 0
if (self.has_email_):
n += 1
n += self.lengthString(len(self.email_))
if (self.has_auth_domain_):
n += 1
n += self.lengthString(len(self.auth_domain_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
if (self.has_gaiaid_):
n += 2
n += self.lengthVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_): n += 2 + self.lengthString(len(self.obfuscated_gaiaid_))
if (self.has_federated_identity_): n += 2 + self.lengthString(len(self.federated_identity_))
if (self.has_federated_provider_): n += 2 + self.lengthString(len(self.federated_provider_))
return n
def Clear(self):
self.clear_email()
self.clear_auth_domain()
self.clear_nickname()
self.clear_gaiaid()
self.clear_obfuscated_gaiaid()
self.clear_federated_identity()
self.clear_federated_provider()
def OutputUnchecked(self, out):
out.putVarInt32(74)
out.putPrefixedString(self.email_)
out.putVarInt32(82)
out.putPrefixedString(self.auth_domain_)
if (self.has_nickname_):
out.putVarInt32(90)
out.putPrefixedString(self.nickname_)
out.putVarInt32(144)
out.putVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_):
out.putVarInt32(154)
out.putPrefixedString(self.obfuscated_gaiaid_)
if (self.has_federated_identity_):
out.putVarInt32(170)
out.putPrefixedString(self.federated_identity_)
if (self.has_federated_provider_):
out.putVarInt32(178)
out.putPrefixedString(self.federated_provider_)
def OutputPartial(self, out):
if (self.has_email_):
out.putVarInt32(74)
out.putPrefixedString(self.email_)
if (self.has_auth_domain_):
out.putVarInt32(82)
out.putPrefixedString(self.auth_domain_)
if (self.has_nickname_):
out.putVarInt32(90)
out.putPrefixedString(self.nickname_)
if (self.has_gaiaid_):
out.putVarInt32(144)
out.putVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_):
out.putVarInt32(154)
out.putPrefixedString(self.obfuscated_gaiaid_)
if (self.has_federated_identity_):
out.putVarInt32(170)
out.putPrefixedString(self.federated_identity_)
if (self.has_federated_provider_):
out.putVarInt32(178)
out.putPrefixedString(self.federated_provider_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 68: break
if tt == 74:
self.set_email(d.getPrefixedString())
continue
if tt == 82:
self.set_auth_domain(d.getPrefixedString())
continue
if tt == 90:
self.set_nickname(d.getPrefixedString())
continue
if tt == 144:
self.set_gaiaid(d.getVarInt64())
continue
if tt == 154:
self.set_obfuscated_gaiaid(d.getPrefixedString())
continue
if tt == 170:
self.set_federated_identity(d.getPrefixedString())
continue
if tt == 178:
self.set_federated_provider(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_email_: res+=prefix+("email: %s\n" % self.DebugFormatString(self.email_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
if self.has_gaiaid_: res+=prefix+("gaiaid: %s\n" % self.DebugFormatInt64(self.gaiaid_))
if self.has_obfuscated_gaiaid_: res+=prefix+("obfuscated_gaiaid: %s\n" % self.DebugFormatString(self.obfuscated_gaiaid_))
if self.has_federated_identity_: res+=prefix+("federated_identity: %s\n" % self.DebugFormatString(self.federated_identity_))
if self.has_federated_provider_: res+=prefix+("federated_provider: %s\n" % self.DebugFormatString(self.federated_provider_))
return res
class PropertyValue_ReferenceValue(ProtocolBuffer.ProtocolMessage):
has_app_ = 0
app_ = ""
has_name_space_ = 0
name_space_ = ""
def __init__(self, contents=None):
self.pathelement_ = []
if contents is not None: self.MergeFromString(contents)
def app(self): return self.app_
def set_app(self, x):
self.has_app_ = 1
self.app_ = x
def clear_app(self):
if self.has_app_:
self.has_app_ = 0
self.app_ = ""
def has_app(self): return self.has_app_
def name_space(self): return self.name_space_
def set_name_space(self, x):
self.has_name_space_ = 1
self.name_space_ = x
def clear_name_space(self):
if self.has_name_space_:
self.has_name_space_ = 0
self.name_space_ = ""
def has_name_space(self): return self.has_name_space_
def pathelement_size(self): return len(self.pathelement_)
def pathelement_list(self): return self.pathelement_
def pathelement(self, i):
return self.pathelement_[i]
def mutable_pathelement(self, i):
return self.pathelement_[i]
def add_pathelement(self):
x = PropertyValue_ReferenceValuePathElement()
self.pathelement_.append(x)
return x
def clear_pathelement(self):
self.pathelement_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_app()): self.set_app(x.app())
if (x.has_name_space()): self.set_name_space(x.name_space())
for i in xrange(x.pathelement_size()): self.add_pathelement().CopyFrom(x.pathelement(i))
def Equals(self, x):
if x is self: return 1
if self.has_app_ != x.has_app_: return 0
if self.has_app_ and self.app_ != x.app_: return 0
if self.has_name_space_ != x.has_name_space_: return 0
if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
if len(self.pathelement_) != len(x.pathelement_): return 0
for e1, e2 in zip(self.pathelement_, x.pathelement_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app not set.')
for p in self.pathelement_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
n += 2 * len(self.pathelement_)
for i in xrange(len(self.pathelement_)): n += self.pathelement_[i].ByteSize()
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_):
n += 1
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
n += 2 * len(self.pathelement_)
for i in xrange(len(self.pathelement_)): n += self.pathelement_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_app()
self.clear_name_space()
self.clear_pathelement()
def OutputUnchecked(self, out):
out.putVarInt32(106)
out.putPrefixedString(self.app_)
for i in xrange(len(self.pathelement_)):
out.putVarInt32(115)
self.pathelement_[i].OutputUnchecked(out)
out.putVarInt32(116)
if (self.has_name_space_):
out.putVarInt32(162)
out.putPrefixedString(self.name_space_)
def OutputPartial(self, out):
if (self.has_app_):
out.putVarInt32(106)
out.putPrefixedString(self.app_)
for i in xrange(len(self.pathelement_)):
out.putVarInt32(115)
self.pathelement_[i].OutputPartial(out)
out.putVarInt32(116)
if (self.has_name_space_):
out.putVarInt32(162)
out.putPrefixedString(self.name_space_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 100: break
if tt == 106:
self.set_app(d.getPrefixedString())
continue
if tt == 115:
self.add_pathelement().TryMerge(d)
continue
if tt == 162:
self.set_name_space(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
cnt=0
for e in self.pathelement_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("PathElement%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
class PropertyValue(ProtocolBuffer.ProtocolMessage):
has_int64value_ = 0
int64value_ = 0
has_booleanvalue_ = 0
booleanvalue_ = 0
has_stringvalue_ = 0
stringvalue_ = ""
has_doublevalue_ = 0
doublevalue_ = 0.0
has_pointvalue_ = 0
pointvalue_ = None
has_uservalue_ = 0
uservalue_ = None
has_referencevalue_ = 0
referencevalue_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def int64value(self): return self.int64value_
def set_int64value(self, x):
self.has_int64value_ = 1
self.int64value_ = x
def clear_int64value(self):
if self.has_int64value_:
self.has_int64value_ = 0
self.int64value_ = 0
def has_int64value(self): return self.has_int64value_
def booleanvalue(self): return self.booleanvalue_
def set_booleanvalue(self, x):
self.has_booleanvalue_ = 1
self.booleanvalue_ = x
def clear_booleanvalue(self):
if self.has_booleanvalue_:
self.has_booleanvalue_ = 0
self.booleanvalue_ = 0
def has_booleanvalue(self): return self.has_booleanvalue_
def stringvalue(self): return self.stringvalue_
def set_stringvalue(self, x):
self.has_stringvalue_ = 1
self.stringvalue_ = x
def clear_stringvalue(self):
if self.has_stringvalue_:
self.has_stringvalue_ = 0
self.stringvalue_ = ""
def has_stringvalue(self): return self.has_stringvalue_
def doublevalue(self): return self.doublevalue_
def set_doublevalue(self, x):
self.has_doublevalue_ = 1
self.doublevalue_ = x
def clear_doublevalue(self):
if self.has_doublevalue_:
self.has_doublevalue_ = 0
self.doublevalue_ = 0.0
def has_doublevalue(self): return self.has_doublevalue_
def pointvalue(self):
if self.pointvalue_ is None:
self.lazy_init_lock_.acquire()
try:
if self.pointvalue_ is None: self.pointvalue_ = PropertyValue_PointValue()
finally:
self.lazy_init_lock_.release()
return self.pointvalue_
def mutable_pointvalue(self): self.has_pointvalue_ = 1; return self.pointvalue()
def clear_pointvalue(self):
if self.has_pointvalue_:
self.has_pointvalue_ = 0;
if self.pointvalue_ is not None: self.pointvalue_.Clear()
def has_pointvalue(self): return self.has_pointvalue_
def uservalue(self):
if self.uservalue_ is None:
self.lazy_init_lock_.acquire()
try:
if self.uservalue_ is None: self.uservalue_ = PropertyValue_UserValue()
finally:
self.lazy_init_lock_.release()
return self.uservalue_
def mutable_uservalue(self): self.has_uservalue_ = 1; return self.uservalue()
def clear_uservalue(self):
if self.has_uservalue_:
self.has_uservalue_ = 0;
if self.uservalue_ is not None: self.uservalue_.Clear()
def has_uservalue(self): return self.has_uservalue_
def referencevalue(self):
if self.referencevalue_ is None:
self.lazy_init_lock_.acquire()
try:
if self.referencevalue_ is None: self.referencevalue_ = PropertyValue_ReferenceValue()
finally:
self.lazy_init_lock_.release()
return self.referencevalue_
def mutable_referencevalue(self): self.has_referencevalue_ = 1; return self.referencevalue()
def clear_referencevalue(self):
if self.has_referencevalue_:
self.has_referencevalue_ = 0;
if self.referencevalue_ is not None: self.referencevalue_.Clear()
def has_referencevalue(self): return self.has_referencevalue_
def MergeFrom(self, x):
assert x is not self
if (x.has_int64value()): self.set_int64value(x.int64value())
if (x.has_booleanvalue()): self.set_booleanvalue(x.booleanvalue())
if (x.has_stringvalue()): self.set_stringvalue(x.stringvalue())
if (x.has_doublevalue()): self.set_doublevalue(x.doublevalue())
if (x.has_pointvalue()): self.mutable_pointvalue().MergeFrom(x.pointvalue())
if (x.has_uservalue()): self.mutable_uservalue().MergeFrom(x.uservalue())
if (x.has_referencevalue()): self.mutable_referencevalue().MergeFrom(x.referencevalue())
def Equals(self, x):
if x is self: return 1
if self.has_int64value_ != x.has_int64value_: return 0
if self.has_int64value_ and self.int64value_ != x.int64value_: return 0
if self.has_booleanvalue_ != x.has_booleanvalue_: return 0
if self.has_booleanvalue_ and self.booleanvalue_ != x.booleanvalue_: return 0
if self.has_stringvalue_ != x.has_stringvalue_: return 0
if self.has_stringvalue_ and self.stringvalue_ != x.stringvalue_: return 0
if self.has_doublevalue_ != x.has_doublevalue_: return 0
if self.has_doublevalue_ and self.doublevalue_ != x.doublevalue_: return 0
if self.has_pointvalue_ != x.has_pointvalue_: return 0
if self.has_pointvalue_ and self.pointvalue_ != x.pointvalue_: return 0
if self.has_uservalue_ != x.has_uservalue_: return 0
if self.has_uservalue_ and self.uservalue_ != x.uservalue_: return 0
if self.has_referencevalue_ != x.has_referencevalue_: return 0
if self.has_referencevalue_ and self.referencevalue_ != x.referencevalue_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_pointvalue_ and not self.pointvalue_.IsInitialized(debug_strs)): initialized = 0
if (self.has_uservalue_ and not self.uservalue_.IsInitialized(debug_strs)): initialized = 0
if (self.has_referencevalue_ and not self.referencevalue_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_int64value_): n += 1 + self.lengthVarInt64(self.int64value_)
if (self.has_booleanvalue_): n += 2
if (self.has_stringvalue_): n += 1 + self.lengthString(len(self.stringvalue_))
if (self.has_doublevalue_): n += 9
if (self.has_pointvalue_): n += 2 + self.pointvalue_.ByteSize()
if (self.has_uservalue_): n += 2 + self.uservalue_.ByteSize()
if (self.has_referencevalue_): n += 2 + self.referencevalue_.ByteSize()
return n
def ByteSizePartial(self):
n = 0
if (self.has_int64value_): n += 1 + self.lengthVarInt64(self.int64value_)
if (self.has_booleanvalue_): n += 2
if (self.has_stringvalue_): n += 1 + self.lengthString(len(self.stringvalue_))
if (self.has_doublevalue_): n += 9
if (self.has_pointvalue_): n += 2 + self.pointvalue_.ByteSizePartial()
if (self.has_uservalue_): n += 2 + self.uservalue_.ByteSizePartial()
if (self.has_referencevalue_): n += 2 + self.referencevalue_.ByteSizePartial()
return n
def Clear(self):
self.clear_int64value()
self.clear_booleanvalue()
self.clear_stringvalue()
self.clear_doublevalue()
self.clear_pointvalue()
self.clear_uservalue()
self.clear_referencevalue()
def OutputUnchecked(self, out):
if (self.has_int64value_):
out.putVarInt32(8)
out.putVarInt64(self.int64value_)
if (self.has_booleanvalue_):
out.putVarInt32(16)
out.putBoolean(self.booleanvalue_)
if (self.has_stringvalue_):
out.putVarInt32(26)
out.putPrefixedString(self.stringvalue_)
if (self.has_doublevalue_):
out.putVarInt32(33)
out.putDouble(self.doublevalue_)
if (self.has_pointvalue_):
out.putVarInt32(43)
self.pointvalue_.OutputUnchecked(out)
out.putVarInt32(44)
if (self.has_uservalue_):
out.putVarInt32(67)
self.uservalue_.OutputUnchecked(out)
out.putVarInt32(68)
if (self.has_referencevalue_):
out.putVarInt32(99)
self.referencevalue_.OutputUnchecked(out)
out.putVarInt32(100)
def OutputPartial(self, out):
if (self.has_int64value_):
out.putVarInt32(8)
out.putVarInt64(self.int64value_)
if (self.has_booleanvalue_):
out.putVarInt32(16)
out.putBoolean(self.booleanvalue_)
if (self.has_stringvalue_):
out.putVarInt32(26)
out.putPrefixedString(self.stringvalue_)
if (self.has_doublevalue_):
out.putVarInt32(33)
out.putDouble(self.doublevalue_)
if (self.has_pointvalue_):
out.putVarInt32(43)
self.pointvalue_.OutputPartial(out)
out.putVarInt32(44)
if (self.has_uservalue_):
out.putVarInt32(67)
self.uservalue_.OutputPartial(out)
out.putVarInt32(68)
if (self.has_referencevalue_):
out.putVarInt32(99)
self.referencevalue_.OutputPartial(out)
out.putVarInt32(100)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_int64value(d.getVarInt64())
continue
if tt == 16:
self.set_booleanvalue(d.getBoolean())
continue
if tt == 26:
self.set_stringvalue(d.getPrefixedString())
continue
if tt == 33:
self.set_doublevalue(d.getDouble())
continue
if tt == 43:
self.mutable_pointvalue().TryMerge(d)
continue
if tt == 67:
self.mutable_uservalue().TryMerge(d)
continue
if tt == 99:
self.mutable_referencevalue().TryMerge(d)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_int64value_: res+=prefix+("int64Value: %s\n" % self.DebugFormatInt64(self.int64value_))
if self.has_booleanvalue_: res+=prefix+("booleanValue: %s\n" % self.DebugFormatBool(self.booleanvalue_))
if self.has_stringvalue_: res+=prefix+("stringValue: %s\n" % self.DebugFormatString(self.stringvalue_))
if self.has_doublevalue_: res+=prefix+("doubleValue: %s\n" % self.DebugFormat(self.doublevalue_))
if self.has_pointvalue_:
res+=prefix+"PointValue {\n"
res+=self.pointvalue_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_uservalue_:
res+=prefix+"UserValue {\n"
res+=self.uservalue_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
if self.has_referencevalue_:
res+=prefix+"ReferenceValue {\n"
res+=self.referencevalue_.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kint64Value = 1
kbooleanValue = 2
kstringValue = 3
kdoubleValue = 4
kPointValueGroup = 5
kPointValuex = 6
kPointValuey = 7
kUserValueGroup = 8
kUserValueemail = 9
kUserValueauth_domain = 10
kUserValuenickname = 11
kUserValuegaiaid = 18
kUserValueobfuscated_gaiaid = 19
kUserValuefederated_identity = 21
kUserValuefederated_provider = 22
kReferenceValueGroup = 12
kReferenceValueapp = 13
kReferenceValuename_space = 20
kReferenceValuePathElementGroup = 14
kReferenceValuePathElementtype = 15
kReferenceValuePathElementid = 16
kReferenceValuePathElementname = 17
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "int64Value",
2: "booleanValue",
3: "stringValue",
4: "doubleValue",
5: "PointValue",
6: "x",
7: "y",
8: "UserValue",
9: "email",
10: "auth_domain",
11: "nickname",
12: "ReferenceValue",
13: "app",
14: "PathElement",
15: "type",
16: "id",
17: "name",
18: "gaiaid",
19: "obfuscated_gaiaid",
20: "name_space",
21: "federated_identity",
22: "federated_provider",
}, 22)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.DOUBLE,
5: ProtocolBuffer.Encoder.STARTGROUP,
6: ProtocolBuffer.Encoder.DOUBLE,
7: ProtocolBuffer.Encoder.DOUBLE,
8: ProtocolBuffer.Encoder.STARTGROUP,
9: ProtocolBuffer.Encoder.STRING,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.STRING,
12: ProtocolBuffer.Encoder.STARTGROUP,
13: ProtocolBuffer.Encoder.STRING,
14: ProtocolBuffer.Encoder.STARTGROUP,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.NUMERIC,
17: ProtocolBuffer.Encoder.STRING,
18: ProtocolBuffer.Encoder.NUMERIC,
19: ProtocolBuffer.Encoder.STRING,
20: ProtocolBuffer.Encoder.STRING,
21: ProtocolBuffer.Encoder.STRING,
22: ProtocolBuffer.Encoder.STRING,
}, 22, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.PropertyValue'
class Property(ProtocolBuffer.ProtocolMessage):
NO_MEANING = 0
BLOB = 14
TEXT = 15
BYTESTRING = 16
ATOM_CATEGORY = 1
ATOM_LINK = 2
ATOM_TITLE = 3
ATOM_CONTENT = 4
ATOM_SUMMARY = 5
ATOM_AUTHOR = 6
GD_WHEN = 7
GD_EMAIL = 8
GEORSS_POINT = 9
GD_IM = 10
GD_PHONENUMBER = 11
GD_POSTALADDRESS = 12
GD_RATING = 13
BLOBKEY = 17
ENTITY_PROTO = 19
INDEX_VALUE = 18
EMPTY_LIST = 24
_Meaning_NAMES = {
0: "NO_MEANING",
14: "BLOB",
15: "TEXT",
16: "BYTESTRING",
1: "ATOM_CATEGORY",
2: "ATOM_LINK",
3: "ATOM_TITLE",
4: "ATOM_CONTENT",
5: "ATOM_SUMMARY",
6: "ATOM_AUTHOR",
7: "GD_WHEN",
8: "GD_EMAIL",
9: "GEORSS_POINT",
10: "GD_IM",
11: "GD_PHONENUMBER",
12: "GD_POSTALADDRESS",
13: "GD_RATING",
17: "BLOBKEY",
19: "ENTITY_PROTO",
18: "INDEX_VALUE",
24: "EMPTY_LIST",
}
def Meaning_Name(cls, x): return cls._Meaning_NAMES.get(x, "")
Meaning_Name = classmethod(Meaning_Name)
has_meaning_ = 0
meaning_ = 0
has_meaning_uri_ = 0
meaning_uri_ = ""
has_name_ = 0
name_ = ""
has_value_ = 0
has_multiple_ = 0
multiple_ = 0
has_embedded_ = 0
embedded_ = 0
def __init__(self, contents=None):
self.value_ = PropertyValue()
if contents is not None: self.MergeFromString(contents)
def meaning(self): return self.meaning_
def set_meaning(self, x):
self.has_meaning_ = 1
self.meaning_ = x
def clear_meaning(self):
if self.has_meaning_:
self.has_meaning_ = 0
self.meaning_ = 0
def has_meaning(self): return self.has_meaning_
def meaning_uri(self): return self.meaning_uri_
def set_meaning_uri(self, x):
self.has_meaning_uri_ = 1
self.meaning_uri_ = x
def clear_meaning_uri(self):
if self.has_meaning_uri_:
self.has_meaning_uri_ = 0
self.meaning_uri_ = ""
def has_meaning_uri(self): return self.has_meaning_uri_
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def value(self): return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value_
def clear_value(self):self.has_value_ = 0; self.value_.Clear()
def has_value(self): return self.has_value_
def multiple(self): return self.multiple_
def set_multiple(self, x):
self.has_multiple_ = 1
self.multiple_ = x
def clear_multiple(self):
if self.has_multiple_:
self.has_multiple_ = 0
self.multiple_ = 0
def has_multiple(self): return self.has_multiple_
def embedded(self): return self.embedded_
def set_embedded(self, x):
self.has_embedded_ = 1
self.embedded_ = x
def clear_embedded(self):
if self.has_embedded_:
self.has_embedded_ = 0
self.embedded_ = 0
def has_embedded(self): return self.has_embedded_
def MergeFrom(self, x):
assert x is not self
if (x.has_meaning()): self.set_meaning(x.meaning())
if (x.has_meaning_uri()): self.set_meaning_uri(x.meaning_uri())
if (x.has_name()): self.set_name(x.name())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
if (x.has_multiple()): self.set_multiple(x.multiple())
if (x.has_embedded()): self.set_embedded(x.embedded())
def Equals(self, x):
if x is self: return 1
if self.has_meaning_ != x.has_meaning_: return 0
if self.has_meaning_ and self.meaning_ != x.meaning_: return 0
if self.has_meaning_uri_ != x.has_meaning_uri_: return 0
if self.has_meaning_uri_ and self.meaning_uri_ != x.meaning_uri_: return 0
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
if self.has_multiple_ != x.has_multiple_: return 0
if self.has_multiple_ and self.multiple_ != x.multiple_: return 0
if self.has_embedded_ != x.has_embedded_: return 0
if self.has_embedded_ and self.embedded_ != x.embedded_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
elif not self.value_.IsInitialized(debug_strs): initialized = 0
if (not self.has_multiple_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: multiple not set.')
return initialized
def ByteSize(self):
n = 0
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
if (self.has_meaning_uri_): n += 1 + self.lengthString(len(self.meaning_uri_))
n += self.lengthString(len(self.name_))
n += self.lengthString(self.value_.ByteSize())
if (self.has_embedded_): n += 2
return n + 4
def ByteSizePartial(self):
n = 0
if (self.has_meaning_): n += 1 + self.lengthVarInt64(self.meaning_)
if (self.has_meaning_uri_): n += 1 + self.lengthString(len(self.meaning_uri_))
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_value_):
n += 1
n += self.lengthString(self.value_.ByteSizePartial())
if (self.has_multiple_):
n += 2
if (self.has_embedded_): n += 2
return n
def Clear(self):
self.clear_meaning()
self.clear_meaning_uri()
self.clear_name()
self.clear_value()
self.clear_multiple()
self.clear_embedded()
def OutputUnchecked(self, out):
if (self.has_meaning_):
out.putVarInt32(8)
out.putVarInt32(self.meaning_)
if (self.has_meaning_uri_):
out.putVarInt32(18)
out.putPrefixedString(self.meaning_uri_)
out.putVarInt32(26)
out.putPrefixedString(self.name_)
out.putVarInt32(32)
out.putBoolean(self.multiple_)
out.putVarInt32(42)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
if (self.has_embedded_):
out.putVarInt32(48)
out.putBoolean(self.embedded_)
def OutputPartial(self, out):
if (self.has_meaning_):
out.putVarInt32(8)
out.putVarInt32(self.meaning_)
if (self.has_meaning_uri_):
out.putVarInt32(18)
out.putPrefixedString(self.meaning_uri_)
if (self.has_name_):
out.putVarInt32(26)
out.putPrefixedString(self.name_)
if (self.has_multiple_):
out.putVarInt32(32)
out.putBoolean(self.multiple_)
if (self.has_value_):
out.putVarInt32(42)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
if (self.has_embedded_):
out.putVarInt32(48)
out.putBoolean(self.embedded_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_meaning(d.getVarInt32())
continue
if tt == 18:
self.set_meaning_uri(d.getPrefixedString())
continue
if tt == 26:
self.set_name(d.getPrefixedString())
continue
if tt == 32:
self.set_multiple(d.getBoolean())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
if tt == 48:
self.set_embedded(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_meaning_: res+=prefix+("meaning: %s\n" % self.DebugFormatInt32(self.meaning_))
if self.has_meaning_uri_: res+=prefix+("meaning_uri: %s\n" % self.DebugFormatString(self.meaning_uri_))
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_multiple_: res+=prefix+("multiple: %s\n" % self.DebugFormatBool(self.multiple_))
if self.has_embedded_: res+=prefix+("embedded: %s\n" % self.DebugFormatBool(self.embedded_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kmeaning = 1
kmeaning_uri = 2
kname = 3
kvalue = 5
kmultiple = 4
kembedded = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "meaning",
2: "meaning_uri",
3: "name",
4: "multiple",
5: "value",
6: "embedded",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Property'
class Path_Element(ProtocolBuffer.ProtocolMessage):
has_type_ = 0
type_ = ""
has_id_ = 0
id_ = 0
has_name_ = 0
name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def type(self): return self.type_
def set_type(self, x):
self.has_type_ = 1
self.type_ = x
def clear_type(self):
if self.has_type_:
self.has_type_ = 0
self.type_ = ""
def has_type(self): return self.has_type_
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = 0
def has_id(self): return self.has_id_
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_type()): self.set_type(x.type())
if (x.has_id()): self.set_id(x.id())
if (x.has_name()): self.set_name(x.name())
def Equals(self, x):
if x is self: return 1
if self.has_type_ != x.has_type_: return 0
if self.has_type_ and self.type_ != x.type_: return 0
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: type not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.type_))
if (self.has_id_): n += 1 + self.lengthVarInt64(self.id_)
if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_type_):
n += 1
n += self.lengthString(len(self.type_))
if (self.has_id_): n += 1 + self.lengthVarInt64(self.id_)
if (self.has_name_): n += 1 + self.lengthString(len(self.name_))
return n
def Clear(self):
self.clear_type()
self.clear_id()
self.clear_name()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.type_)
if (self.has_id_):
out.putVarInt32(24)
out.putVarInt64(self.id_)
if (self.has_name_):
out.putVarInt32(34)
out.putPrefixedString(self.name_)
def OutputPartial(self, out):
if (self.has_type_):
out.putVarInt32(18)
out.putPrefixedString(self.type_)
if (self.has_id_):
out.putVarInt32(24)
out.putVarInt64(self.id_)
if (self.has_name_):
out.putVarInt32(34)
out.putPrefixedString(self.name_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
self.set_type(d.getPrefixedString())
continue
if tt == 24:
self.set_id(d.getVarInt64())
continue
if tt == 34:
self.set_name(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_type_: res+=prefix+("type: %s\n" % self.DebugFormatString(self.type_))
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatInt64(self.id_))
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
return res
class Path(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.element_ = []
if contents is not None: self.MergeFromString(contents)
def element_size(self): return len(self.element_)
def element_list(self): return self.element_
def element(self, i):
return self.element_[i]
def mutable_element(self, i):
return self.element_[i]
def add_element(self):
x = Path_Element()
self.element_.append(x)
return x
def clear_element(self):
self.element_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.element_size()): self.add_element().CopyFrom(x.element(i))
def Equals(self, x):
if x is self: return 1
if len(self.element_) != len(x.element_): return 0
for e1, e2 in zip(self.element_, x.element_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.element_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.element_)
for i in xrange(len(self.element_)): n += self.element_[i].ByteSize()
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.element_)
for i in xrange(len(self.element_)): n += self.element_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_element()
def OutputUnchecked(self, out):
for i in xrange(len(self.element_)):
out.putVarInt32(11)
self.element_[i].OutputUnchecked(out)
out.putVarInt32(12)
def OutputPartial(self, out):
for i in xrange(len(self.element_)):
out.putVarInt32(11)
self.element_[i].OutputPartial(out)
out.putVarInt32(12)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_element().TryMerge(d)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.element_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Element%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kElementGroup = 1
kElementtype = 2
kElementid = 3
kElementname = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Element",
2: "type",
3: "id",
4: "name",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Path'
class Reference(ProtocolBuffer.ProtocolMessage):
has_app_ = 0
app_ = ""
has_name_space_ = 0
name_space_ = ""
has_path_ = 0
def __init__(self, contents=None):
self.path_ = Path()
if contents is not None: self.MergeFromString(contents)
def app(self): return self.app_
def set_app(self, x):
self.has_app_ = 1
self.app_ = x
def clear_app(self):
if self.has_app_:
self.has_app_ = 0
self.app_ = ""
def has_app(self): return self.has_app_
def name_space(self): return self.name_space_
def set_name_space(self, x):
self.has_name_space_ = 1
self.name_space_ = x
def clear_name_space(self):
if self.has_name_space_:
self.has_name_space_ = 0
self.name_space_ = ""
def has_name_space(self): return self.has_name_space_
def path(self): return self.path_
def mutable_path(self): self.has_path_ = 1; return self.path_
def clear_path(self):self.has_path_ = 0; self.path_.Clear()
def has_path(self): return self.has_path_
def MergeFrom(self, x):
assert x is not self
if (x.has_app()): self.set_app(x.app())
if (x.has_name_space()): self.set_name_space(x.name_space())
if (x.has_path()): self.mutable_path().MergeFrom(x.path())
def Equals(self, x):
if x is self: return 1
if self.has_app_ != x.has_app_: return 0
if self.has_app_ and self.app_ != x.app_: return 0
if self.has_name_space_ != x.has_name_space_: return 0
if self.has_name_space_ and self.name_space_ != x.name_space_: return 0
if self.has_path_ != x.has_path_: return 0
if self.has_path_ and self.path_ != x.path_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app not set.')
if (not self.has_path_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: path not set.')
elif not self.path_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
n += self.lengthString(self.path_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_app_):
n += 1
n += self.lengthString(len(self.app_))
if (self.has_name_space_): n += 2 + self.lengthString(len(self.name_space_))
if (self.has_path_):
n += 1
n += self.lengthString(self.path_.ByteSizePartial())
return n
def Clear(self):
self.clear_app()
self.clear_name_space()
self.clear_path()
def OutputUnchecked(self, out):
out.putVarInt32(106)
out.putPrefixedString(self.app_)
out.putVarInt32(114)
out.putVarInt32(self.path_.ByteSize())
self.path_.OutputUnchecked(out)
if (self.has_name_space_):
out.putVarInt32(162)
out.putPrefixedString(self.name_space_)
def OutputPartial(self, out):
if (self.has_app_):
out.putVarInt32(106)
out.putPrefixedString(self.app_)
if (self.has_path_):
out.putVarInt32(114)
out.putVarInt32(self.path_.ByteSizePartial())
self.path_.OutputPartial(out)
if (self.has_name_space_):
out.putVarInt32(162)
out.putPrefixedString(self.name_space_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 106:
self.set_app(d.getPrefixedString())
continue
if tt == 114:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_path().TryMerge(tmp)
continue
if tt == 162:
self.set_name_space(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_: res+=prefix+("app: %s\n" % self.DebugFormatString(self.app_))
if self.has_name_space_: res+=prefix+("name_space: %s\n" % self.DebugFormatString(self.name_space_))
if self.has_path_:
res+=prefix+"path <\n"
res+=self.path_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp = 13
kname_space = 20
kpath = 14
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
13: "app",
14: "path",
20: "name_space",
}, 20)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
13: ProtocolBuffer.Encoder.STRING,
14: ProtocolBuffer.Encoder.STRING,
20: ProtocolBuffer.Encoder.STRING,
}, 20, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Reference'
class User(ProtocolBuffer.ProtocolMessage):
has_email_ = 0
email_ = ""
has_auth_domain_ = 0
auth_domain_ = ""
has_nickname_ = 0
nickname_ = ""
has_gaiaid_ = 0
gaiaid_ = 0
has_obfuscated_gaiaid_ = 0
obfuscated_gaiaid_ = ""
has_federated_identity_ = 0
federated_identity_ = ""
has_federated_provider_ = 0
federated_provider_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def email(self): return self.email_
def set_email(self, x):
self.has_email_ = 1
self.email_ = x
def clear_email(self):
if self.has_email_:
self.has_email_ = 0
self.email_ = ""
def has_email(self): return self.has_email_
def auth_domain(self): return self.auth_domain_
def set_auth_domain(self, x):
self.has_auth_domain_ = 1
self.auth_domain_ = x
def clear_auth_domain(self):
if self.has_auth_domain_:
self.has_auth_domain_ = 0
self.auth_domain_ = ""
def has_auth_domain(self): return self.has_auth_domain_
def nickname(self): return self.nickname_
def set_nickname(self, x):
self.has_nickname_ = 1
self.nickname_ = x
def clear_nickname(self):
if self.has_nickname_:
self.has_nickname_ = 0
self.nickname_ = ""
def has_nickname(self): return self.has_nickname_
def gaiaid(self): return self.gaiaid_
def set_gaiaid(self, x):
self.has_gaiaid_ = 1
self.gaiaid_ = x
def clear_gaiaid(self):
if self.has_gaiaid_:
self.has_gaiaid_ = 0
self.gaiaid_ = 0
def has_gaiaid(self): return self.has_gaiaid_
def obfuscated_gaiaid(self): return self.obfuscated_gaiaid_
def set_obfuscated_gaiaid(self, x):
self.has_obfuscated_gaiaid_ = 1
self.obfuscated_gaiaid_ = x
def clear_obfuscated_gaiaid(self):
if self.has_obfuscated_gaiaid_:
self.has_obfuscated_gaiaid_ = 0
self.obfuscated_gaiaid_ = ""
def has_obfuscated_gaiaid(self): return self.has_obfuscated_gaiaid_
def federated_identity(self): return self.federated_identity_
def set_federated_identity(self, x):
self.has_federated_identity_ = 1
self.federated_identity_ = x
def clear_federated_identity(self):
if self.has_federated_identity_:
self.has_federated_identity_ = 0
self.federated_identity_ = ""
def has_federated_identity(self): return self.has_federated_identity_
def federated_provider(self): return self.federated_provider_
def set_federated_provider(self, x):
self.has_federated_provider_ = 1
self.federated_provider_ = x
def clear_federated_provider(self):
if self.has_federated_provider_:
self.has_federated_provider_ = 0
self.federated_provider_ = ""
def has_federated_provider(self): return self.has_federated_provider_
def MergeFrom(self, x):
assert x is not self
if (x.has_email()): self.set_email(x.email())
if (x.has_auth_domain()): self.set_auth_domain(x.auth_domain())
if (x.has_nickname()): self.set_nickname(x.nickname())
if (x.has_gaiaid()): self.set_gaiaid(x.gaiaid())
if (x.has_obfuscated_gaiaid()): self.set_obfuscated_gaiaid(x.obfuscated_gaiaid())
if (x.has_federated_identity()): self.set_federated_identity(x.federated_identity())
if (x.has_federated_provider()): self.set_federated_provider(x.federated_provider())
def Equals(self, x):
if x is self: return 1
if self.has_email_ != x.has_email_: return 0
if self.has_email_ and self.email_ != x.email_: return 0
if self.has_auth_domain_ != x.has_auth_domain_: return 0
if self.has_auth_domain_ and self.auth_domain_ != x.auth_domain_: return 0
if self.has_nickname_ != x.has_nickname_: return 0
if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
if self.has_gaiaid_ != x.has_gaiaid_: return 0
if self.has_gaiaid_ and self.gaiaid_ != x.gaiaid_: return 0
if self.has_obfuscated_gaiaid_ != x.has_obfuscated_gaiaid_: return 0
if self.has_obfuscated_gaiaid_ and self.obfuscated_gaiaid_ != x.obfuscated_gaiaid_: return 0
if self.has_federated_identity_ != x.has_federated_identity_: return 0
if self.has_federated_identity_ and self.federated_identity_ != x.federated_identity_: return 0
if self.has_federated_provider_ != x.has_federated_provider_: return 0
if self.has_federated_provider_ and self.federated_provider_ != x.federated_provider_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_email_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: email not set.')
if (not self.has_auth_domain_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: auth_domain not set.')
if (not self.has_gaiaid_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: gaiaid not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.email_))
n += self.lengthString(len(self.auth_domain_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
n += self.lengthVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_): n += 1 + self.lengthString(len(self.obfuscated_gaiaid_))
if (self.has_federated_identity_): n += 1 + self.lengthString(len(self.federated_identity_))
if (self.has_federated_provider_): n += 1 + self.lengthString(len(self.federated_provider_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_email_):
n += 1
n += self.lengthString(len(self.email_))
if (self.has_auth_domain_):
n += 1
n += self.lengthString(len(self.auth_domain_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
if (self.has_gaiaid_):
n += 1
n += self.lengthVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_): n += 1 + self.lengthString(len(self.obfuscated_gaiaid_))
if (self.has_federated_identity_): n += 1 + self.lengthString(len(self.federated_identity_))
if (self.has_federated_provider_): n += 1 + self.lengthString(len(self.federated_provider_))
return n
def Clear(self):
self.clear_email()
self.clear_auth_domain()
self.clear_nickname()
self.clear_gaiaid()
self.clear_obfuscated_gaiaid()
self.clear_federated_identity()
self.clear_federated_provider()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.email_)
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
if (self.has_nickname_):
out.putVarInt32(26)
out.putPrefixedString(self.nickname_)
out.putVarInt32(32)
out.putVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_):
out.putVarInt32(42)
out.putPrefixedString(self.obfuscated_gaiaid_)
if (self.has_federated_identity_):
out.putVarInt32(50)
out.putPrefixedString(self.federated_identity_)
if (self.has_federated_provider_):
out.putVarInt32(58)
out.putPrefixedString(self.federated_provider_)
def OutputPartial(self, out):
if (self.has_email_):
out.putVarInt32(10)
out.putPrefixedString(self.email_)
if (self.has_auth_domain_):
out.putVarInt32(18)
out.putPrefixedString(self.auth_domain_)
if (self.has_nickname_):
out.putVarInt32(26)
out.putPrefixedString(self.nickname_)
if (self.has_gaiaid_):
out.putVarInt32(32)
out.putVarInt64(self.gaiaid_)
if (self.has_obfuscated_gaiaid_):
out.putVarInt32(42)
out.putPrefixedString(self.obfuscated_gaiaid_)
if (self.has_federated_identity_):
out.putVarInt32(50)
out.putPrefixedString(self.federated_identity_)
if (self.has_federated_provider_):
out.putVarInt32(58)
out.putPrefixedString(self.federated_provider_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_email(d.getPrefixedString())
continue
if tt == 18:
self.set_auth_domain(d.getPrefixedString())
continue
if tt == 26:
self.set_nickname(d.getPrefixedString())
continue
if tt == 32:
self.set_gaiaid(d.getVarInt64())
continue
if tt == 42:
self.set_obfuscated_gaiaid(d.getPrefixedString())
continue
if tt == 50:
self.set_federated_identity(d.getPrefixedString())
continue
if tt == 58:
self.set_federated_provider(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_email_: res+=prefix+("email: %s\n" % self.DebugFormatString(self.email_))
if self.has_auth_domain_: res+=prefix+("auth_domain: %s\n" % self.DebugFormatString(self.auth_domain_))
if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
if self.has_gaiaid_: res+=prefix+("gaiaid: %s\n" % self.DebugFormatInt64(self.gaiaid_))
if self.has_obfuscated_gaiaid_: res+=prefix+("obfuscated_gaiaid: %s\n" % self.DebugFormatString(self.obfuscated_gaiaid_))
if self.has_federated_identity_: res+=prefix+("federated_identity: %s\n" % self.DebugFormatString(self.federated_identity_))
if self.has_federated_provider_: res+=prefix+("federated_provider: %s\n" % self.DebugFormatString(self.federated_provider_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kemail = 1
kauth_domain = 2
knickname = 3
kgaiaid = 4
kobfuscated_gaiaid = 5
kfederated_identity = 6
kfederated_provider = 7
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "email",
2: "auth_domain",
3: "nickname",
4: "gaiaid",
5: "obfuscated_gaiaid",
6: "federated_identity",
7: "federated_provider",
}, 7)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.STRING,
}, 7, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.User'
class EntityProto(ProtocolBuffer.ProtocolMessage):
GD_CONTACT = 1
GD_EVENT = 2
GD_MESSAGE = 3
_Kind_NAMES = {
1: "GD_CONTACT",
2: "GD_EVENT",
3: "GD_MESSAGE",
}
def Kind_Name(cls, x): return cls._Kind_NAMES.get(x, "")
Kind_Name = classmethod(Kind_Name)
has_key_ = 0
has_entity_group_ = 0
has_owner_ = 0
owner_ = None
has_kind_ = 0
kind_ = 0
has_kind_uri_ = 0
kind_uri_ = ""
def __init__(self, contents=None):
self.key_ = Reference()
self.entity_group_ = Path()
self.property_ = []
self.raw_property_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key_
def clear_key(self):self.has_key_ = 0; self.key_.Clear()
def has_key(self): return self.has_key_
def entity_group(self): return self.entity_group_
def mutable_entity_group(self): self.has_entity_group_ = 1; return self.entity_group_
def clear_entity_group(self):self.has_entity_group_ = 0; self.entity_group_.Clear()
def has_entity_group(self): return self.has_entity_group_
def owner(self):
if self.owner_ is None:
self.lazy_init_lock_.acquire()
try:
if self.owner_ is None: self.owner_ = User()
finally:
self.lazy_init_lock_.release()
return self.owner_
def mutable_owner(self): self.has_owner_ = 1; return self.owner()
def clear_owner(self):
if self.has_owner_:
self.has_owner_ = 0;
if self.owner_ is not None: self.owner_.Clear()
def has_owner(self): return self.has_owner_
def kind(self): return self.kind_
def set_kind(self, x):
self.has_kind_ = 1
self.kind_ = x
def clear_kind(self):
if self.has_kind_:
self.has_kind_ = 0
self.kind_ = 0
def has_kind(self): return self.has_kind_
def kind_uri(self): return self.kind_uri_
def set_kind_uri(self, x):
self.has_kind_uri_ = 1
self.kind_uri_ = x
def clear_kind_uri(self):
if self.has_kind_uri_:
self.has_kind_uri_ = 0
self.kind_uri_ = ""
def has_kind_uri(self): return self.has_kind_uri_
def property_size(self): return len(self.property_)
def property_list(self): return self.property_
def property(self, i):
return self.property_[i]
def mutable_property(self, i):
return self.property_[i]
def add_property(self):
x = Property()
self.property_.append(x)
return x
def clear_property(self):
self.property_ = []
def raw_property_size(self): return len(self.raw_property_)
def raw_property_list(self): return self.raw_property_
def raw_property(self, i):
return self.raw_property_[i]
def mutable_raw_property(self, i):
return self.raw_property_[i]
def add_raw_property(self):
x = Property()
self.raw_property_.append(x)
return x
def clear_raw_property(self):
self.raw_property_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_entity_group()): self.mutable_entity_group().MergeFrom(x.entity_group())
if (x.has_owner()): self.mutable_owner().MergeFrom(x.owner())
if (x.has_kind()): self.set_kind(x.kind())
if (x.has_kind_uri()): self.set_kind_uri(x.kind_uri())
for i in xrange(x.property_size()): self.add_property().CopyFrom(x.property(i))
for i in xrange(x.raw_property_size()): self.add_raw_property().CopyFrom(x.raw_property(i))
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_entity_group_ != x.has_entity_group_: return 0
if self.has_entity_group_ and self.entity_group_ != x.entity_group_: return 0
if self.has_owner_ != x.has_owner_: return 0
if self.has_owner_ and self.owner_ != x.owner_: return 0
if self.has_kind_ != x.has_kind_: return 0
if self.has_kind_ and self.kind_ != x.kind_: return 0
if self.has_kind_uri_ != x.has_kind_uri_: return 0
if self.has_kind_uri_ and self.kind_uri_ != x.kind_uri_: return 0
if len(self.property_) != len(x.property_): return 0
for e1, e2 in zip(self.property_, x.property_):
if e1 != e2: return 0
if len(self.raw_property_) != len(x.raw_property_): return 0
for e1, e2 in zip(self.raw_property_, x.raw_property_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
elif not self.key_.IsInitialized(debug_strs): initialized = 0
if (not self.has_entity_group_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: entity_group not set.')
elif not self.entity_group_.IsInitialized(debug_strs): initialized = 0
if (self.has_owner_ and not self.owner_.IsInitialized(debug_strs)): initialized = 0
for p in self.property_:
if not p.IsInitialized(debug_strs): initialized=0
for p in self.raw_property_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.key_.ByteSize())
n += self.lengthString(self.entity_group_.ByteSize())
if (self.has_owner_): n += 2 + self.lengthString(self.owner_.ByteSize())
if (self.has_kind_): n += 1 + self.lengthVarInt64(self.kind_)
if (self.has_kind_uri_): n += 1 + self.lengthString(len(self.kind_uri_))
n += 1 * len(self.property_)
for i in xrange(len(self.property_)): n += self.lengthString(self.property_[i].ByteSize())
n += 1 * len(self.raw_property_)
for i in xrange(len(self.raw_property_)): n += self.lengthString(self.raw_property_[i].ByteSize())
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(self.key_.ByteSizePartial())
if (self.has_entity_group_):
n += 2
n += self.lengthString(self.entity_group_.ByteSizePartial())
if (self.has_owner_): n += 2 + self.lengthString(self.owner_.ByteSizePartial())
if (self.has_kind_): n += 1 + self.lengthVarInt64(self.kind_)
if (self.has_kind_uri_): n += 1 + self.lengthString(len(self.kind_uri_))
n += 1 * len(self.property_)
for i in xrange(len(self.property_)): n += self.lengthString(self.property_[i].ByteSizePartial())
n += 1 * len(self.raw_property_)
for i in xrange(len(self.raw_property_)): n += self.lengthString(self.raw_property_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_key()
self.clear_entity_group()
self.clear_owner()
self.clear_kind()
self.clear_kind_uri()
self.clear_property()
self.clear_raw_property()
def OutputUnchecked(self, out):
if (self.has_kind_):
out.putVarInt32(32)
out.putVarInt32(self.kind_)
if (self.has_kind_uri_):
out.putVarInt32(42)
out.putPrefixedString(self.kind_uri_)
out.putVarInt32(106)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
for i in xrange(len(self.property_)):
out.putVarInt32(114)
out.putVarInt32(self.property_[i].ByteSize())
self.property_[i].OutputUnchecked(out)
for i in xrange(len(self.raw_property_)):
out.putVarInt32(122)
out.putVarInt32(self.raw_property_[i].ByteSize())
self.raw_property_[i].OutputUnchecked(out)
out.putVarInt32(130)
out.putVarInt32(self.entity_group_.ByteSize())
self.entity_group_.OutputUnchecked(out)
if (self.has_owner_):
out.putVarInt32(138)
out.putVarInt32(self.owner_.ByteSize())
self.owner_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_kind_):
out.putVarInt32(32)
out.putVarInt32(self.kind_)
if (self.has_kind_uri_):
out.putVarInt32(42)
out.putPrefixedString(self.kind_uri_)
if (self.has_key_):
out.putVarInt32(106)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
for i in xrange(len(self.property_)):
out.putVarInt32(114)
out.putVarInt32(self.property_[i].ByteSizePartial())
self.property_[i].OutputPartial(out)
for i in xrange(len(self.raw_property_)):
out.putVarInt32(122)
out.putVarInt32(self.raw_property_[i].ByteSizePartial())
self.raw_property_[i].OutputPartial(out)
if (self.has_entity_group_):
out.putVarInt32(130)
out.putVarInt32(self.entity_group_.ByteSizePartial())
self.entity_group_.OutputPartial(out)
if (self.has_owner_):
out.putVarInt32(138)
out.putVarInt32(self.owner_.ByteSizePartial())
self.owner_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 32:
self.set_kind(d.getVarInt32())
continue
if tt == 42:
self.set_kind_uri(d.getPrefixedString())
continue
if tt == 106:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 114:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_property().TryMerge(tmp)
continue
if tt == 122:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_raw_property().TryMerge(tmp)
continue
if tt == 130:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_entity_group().TryMerge(tmp)
continue
if tt == 138:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_owner().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_entity_group_:
res+=prefix+"entity_group <\n"
res+=self.entity_group_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_owner_:
res+=prefix+"owner <\n"
res+=self.owner_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_kind_: res+=prefix+("kind: %s\n" % self.DebugFormatInt32(self.kind_))
if self.has_kind_uri_: res+=prefix+("kind_uri: %s\n" % self.DebugFormatString(self.kind_uri_))
cnt=0
for e in self.property_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("property%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
cnt=0
for e in self.raw_property_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("raw_property%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kkey = 13
kentity_group = 16
kowner = 17
kkind = 4
kkind_uri = 5
kproperty = 14
kraw_property = 15
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
4: "kind",
5: "kind_uri",
13: "key",
14: "property",
15: "raw_property",
16: "entity_group",
17: "owner",
}, 17)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
13: ProtocolBuffer.Encoder.STRING,
14: ProtocolBuffer.Encoder.STRING,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.STRING,
17: ProtocolBuffer.Encoder.STRING,
}, 17, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.EntityProto'
class EntityMetadata(ProtocolBuffer.ProtocolMessage):
has_created_version_ = 0
created_version_ = 0
has_updated_version_ = 0
updated_version_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def created_version(self): return self.created_version_
def set_created_version(self, x):
self.has_created_version_ = 1
self.created_version_ = x
def clear_created_version(self):
if self.has_created_version_:
self.has_created_version_ = 0
self.created_version_ = 0
def has_created_version(self): return self.has_created_version_
def updated_version(self): return self.updated_version_
def set_updated_version(self, x):
self.has_updated_version_ = 1
self.updated_version_ = x
def clear_updated_version(self):
if self.has_updated_version_:
self.has_updated_version_ = 0
self.updated_version_ = 0
def has_updated_version(self): return self.has_updated_version_
def MergeFrom(self, x):
assert x is not self
if (x.has_created_version()): self.set_created_version(x.created_version())
if (x.has_updated_version()): self.set_updated_version(x.updated_version())
def Equals(self, x):
if x is self: return 1
if self.has_created_version_ != x.has_created_version_: return 0
if self.has_created_version_ and self.created_version_ != x.created_version_: return 0
if self.has_updated_version_ != x.has_updated_version_: return 0
if self.has_updated_version_ and self.updated_version_ != x.updated_version_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_created_version_): n += 1 + self.lengthVarInt64(self.created_version_)
if (self.has_updated_version_): n += 1 + self.lengthVarInt64(self.updated_version_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_created_version_): n += 1 + self.lengthVarInt64(self.created_version_)
if (self.has_updated_version_): n += 1 + self.lengthVarInt64(self.updated_version_)
return n
def Clear(self):
self.clear_created_version()
self.clear_updated_version()
def OutputUnchecked(self, out):
if (self.has_created_version_):
out.putVarInt32(8)
out.putVarInt64(self.created_version_)
if (self.has_updated_version_):
out.putVarInt32(16)
out.putVarInt64(self.updated_version_)
def OutputPartial(self, out):
if (self.has_created_version_):
out.putVarInt32(8)
out.putVarInt64(self.created_version_)
if (self.has_updated_version_):
out.putVarInt32(16)
out.putVarInt64(self.updated_version_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_created_version(d.getVarInt64())
continue
if tt == 16:
self.set_updated_version(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_created_version_: res+=prefix+("created_version: %s\n" % self.DebugFormatInt64(self.created_version_))
if self.has_updated_version_: res+=prefix+("updated_version: %s\n" % self.DebugFormatInt64(self.updated_version_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcreated_version = 1
kupdated_version = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "created_version",
2: "updated_version",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.EntityMetadata'
class CompositeProperty(ProtocolBuffer.ProtocolMessage):
has_index_id_ = 0
index_id_ = 0
def __init__(self, contents=None):
self.value_ = []
if contents is not None: self.MergeFromString(contents)
def index_id(self): return self.index_id_
def set_index_id(self, x):
self.has_index_id_ = 1
self.index_id_ = x
def clear_index_id(self):
if self.has_index_id_:
self.has_index_id_ = 0
self.index_id_ = 0
def has_index_id(self): return self.has_index_id_
def value_size(self): return len(self.value_)
def value_list(self): return self.value_
def value(self, i):
return self.value_[i]
def set_value(self, i, x):
self.value_[i] = x
def add_value(self, x):
self.value_.append(x)
def clear_value(self):
self.value_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_index_id()): self.set_index_id(x.index_id())
for i in xrange(x.value_size()): self.add_value(x.value(i))
def Equals(self, x):
if x is self: return 1
if self.has_index_id_ != x.has_index_id_: return 0
if self.has_index_id_ and self.index_id_ != x.index_id_: return 0
if len(self.value_) != len(x.value_): return 0
for e1, e2 in zip(self.value_, x.value_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_id not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.index_id_)
n += 1 * len(self.value_)
for i in xrange(len(self.value_)): n += self.lengthString(len(self.value_[i]))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_index_id_):
n += 1
n += self.lengthVarInt64(self.index_id_)
n += 1 * len(self.value_)
for i in xrange(len(self.value_)): n += self.lengthString(len(self.value_[i]))
return n
def Clear(self):
self.clear_index_id()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.index_id_)
for i in xrange(len(self.value_)):
out.putVarInt32(18)
out.putPrefixedString(self.value_[i])
def OutputPartial(self, out):
if (self.has_index_id_):
out.putVarInt32(8)
out.putVarInt64(self.index_id_)
for i in xrange(len(self.value_)):
out.putVarInt32(18)
out.putPrefixedString(self.value_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_index_id(d.getVarInt64())
continue
if tt == 18:
self.add_value(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_id_: res+=prefix+("index_id: %s\n" % self.DebugFormatInt64(self.index_id_))
cnt=0
for e in self.value_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("value%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_id = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_id",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.CompositeProperty'
class Index_Property(ProtocolBuffer.ProtocolMessage):
DIRECTION_UNSPECIFIED = 0
ASCENDING = 1
DESCENDING = 2
_Direction_NAMES = {
0: "DIRECTION_UNSPECIFIED",
1: "ASCENDING",
2: "DESCENDING",
}
def Direction_Name(cls, x): return cls._Direction_NAMES.get(x, "")
Direction_Name = classmethod(Direction_Name)
MODE_UNSPECIFIED = 0
GEOSPATIAL = 3
_Mode_NAMES = {
0: "MODE_UNSPECIFIED",
3: "GEOSPATIAL",
}
def Mode_Name(cls, x): return cls._Mode_NAMES.get(x, "")
Mode_Name = classmethod(Mode_Name)
has_name_ = 0
name_ = ""
has_direction_ = 0
direction_ = 0
has_mode_ = 0
mode_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def name(self): return self.name_
def set_name(self, x):
self.has_name_ = 1
self.name_ = x
def clear_name(self):
if self.has_name_:
self.has_name_ = 0
self.name_ = ""
def has_name(self): return self.has_name_
def direction(self): return self.direction_
def set_direction(self, x):
self.has_direction_ = 1
self.direction_ = x
def clear_direction(self):
if self.has_direction_:
self.has_direction_ = 0
self.direction_ = 0
def has_direction(self): return self.has_direction_
def mode(self): return self.mode_
def set_mode(self, x):
self.has_mode_ = 1
self.mode_ = x
def clear_mode(self):
if self.has_mode_:
self.has_mode_ = 0
self.mode_ = 0
def has_mode(self): return self.has_mode_
def MergeFrom(self, x):
assert x is not self
if (x.has_name()): self.set_name(x.name())
if (x.has_direction()): self.set_direction(x.direction())
if (x.has_mode()): self.set_mode(x.mode())
def Equals(self, x):
if x is self: return 1
if self.has_name_ != x.has_name_: return 0
if self.has_name_ and self.name_ != x.name_: return 0
if self.has_direction_ != x.has_direction_: return 0
if self.has_direction_ and self.direction_ != x.direction_: return 0
if self.has_mode_ != x.has_mode_: return 0
if self.has_mode_ and self.mode_ != x.mode_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: name not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.name_))
if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_name_):
n += 1
n += self.lengthString(len(self.name_))
if (self.has_direction_): n += 1 + self.lengthVarInt64(self.direction_)
if (self.has_mode_): n += 1 + self.lengthVarInt64(self.mode_)
return n
def Clear(self):
self.clear_name()
self.clear_direction()
self.clear_mode()
def OutputUnchecked(self, out):
out.putVarInt32(26)
out.putPrefixedString(self.name_)
if (self.has_direction_):
out.putVarInt32(32)
out.putVarInt32(self.direction_)
if (self.has_mode_):
out.putVarInt32(48)
out.putVarInt32(self.mode_)
def OutputPartial(self, out):
if (self.has_name_):
out.putVarInt32(26)
out.putPrefixedString(self.name_)
if (self.has_direction_):
out.putVarInt32(32)
out.putVarInt32(self.direction_)
if (self.has_mode_):
out.putVarInt32(48)
out.putVarInt32(self.mode_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 20: break
if tt == 26:
self.set_name(d.getPrefixedString())
continue
if tt == 32:
self.set_direction(d.getVarInt32())
continue
if tt == 48:
self.set_mode(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_name_: res+=prefix+("name: %s\n" % self.DebugFormatString(self.name_))
if self.has_direction_: res+=prefix+("direction: %s\n" % self.DebugFormatInt32(self.direction_))
if self.has_mode_: res+=prefix+("mode: %s\n" % self.DebugFormatInt32(self.mode_))
return res
class Index(ProtocolBuffer.ProtocolMessage):
has_entity_type_ = 0
entity_type_ = ""
has_ancestor_ = 0
ancestor_ = 0
def __init__(self, contents=None):
self.property_ = []
if contents is not None: self.MergeFromString(contents)
def entity_type(self): return self.entity_type_
def set_entity_type(self, x):
self.has_entity_type_ = 1
self.entity_type_ = x
def clear_entity_type(self):
if self.has_entity_type_:
self.has_entity_type_ = 0
self.entity_type_ = ""
def has_entity_type(self): return self.has_entity_type_
def ancestor(self): return self.ancestor_
def set_ancestor(self, x):
self.has_ancestor_ = 1
self.ancestor_ = x
def clear_ancestor(self):
if self.has_ancestor_:
self.has_ancestor_ = 0
self.ancestor_ = 0
def has_ancestor(self): return self.has_ancestor_
def property_size(self): return len(self.property_)
def property_list(self): return self.property_
def property(self, i):
return self.property_[i]
def mutable_property(self, i):
return self.property_[i]
def add_property(self):
x = Index_Property()
self.property_.append(x)
return x
def clear_property(self):
self.property_ = []
def MergeFrom(self, x):
assert x is not self
if (x.has_entity_type()): self.set_entity_type(x.entity_type())
if (x.has_ancestor()): self.set_ancestor(x.ancestor())
for i in xrange(x.property_size()): self.add_property().CopyFrom(x.property(i))
def Equals(self, x):
if x is self: return 1
if self.has_entity_type_ != x.has_entity_type_: return 0
if self.has_entity_type_ and self.entity_type_ != x.entity_type_: return 0
if self.has_ancestor_ != x.has_ancestor_: return 0
if self.has_ancestor_ and self.ancestor_ != x.ancestor_: return 0
if len(self.property_) != len(x.property_): return 0
for e1, e2 in zip(self.property_, x.property_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_entity_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: entity_type not set.')
if (not self.has_ancestor_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: ancestor not set.')
for p in self.property_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.entity_type_))
n += 2 * len(self.property_)
for i in xrange(len(self.property_)): n += self.property_[i].ByteSize()
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_entity_type_):
n += 1
n += self.lengthString(len(self.entity_type_))
if (self.has_ancestor_):
n += 2
n += 2 * len(self.property_)
for i in xrange(len(self.property_)): n += self.property_[i].ByteSizePartial()
return n
def Clear(self):
self.clear_entity_type()
self.clear_ancestor()
self.clear_property()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.entity_type_)
for i in xrange(len(self.property_)):
out.putVarInt32(19)
self.property_[i].OutputUnchecked(out)
out.putVarInt32(20)
out.putVarInt32(40)
out.putBoolean(self.ancestor_)
def OutputPartial(self, out):
if (self.has_entity_type_):
out.putVarInt32(10)
out.putPrefixedString(self.entity_type_)
for i in xrange(len(self.property_)):
out.putVarInt32(19)
self.property_[i].OutputPartial(out)
out.putVarInt32(20)
if (self.has_ancestor_):
out.putVarInt32(40)
out.putBoolean(self.ancestor_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_entity_type(d.getPrefixedString())
continue
if tt == 19:
self.add_property().TryMerge(d)
continue
if tt == 40:
self.set_ancestor(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_entity_type_: res+=prefix+("entity_type: %s\n" % self.DebugFormatString(self.entity_type_))
if self.has_ancestor_: res+=prefix+("ancestor: %s\n" % self.DebugFormatBool(self.ancestor_))
cnt=0
for e in self.property_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Property%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kentity_type = 1
kancestor = 5
kPropertyGroup = 2
kPropertyname = 3
kPropertydirection = 4
kPropertymode = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "entity_type",
2: "Property",
3: "name",
4: "direction",
5: "ancestor",
6: "mode",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STARTGROUP,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.Index'
class CompositeIndex(ProtocolBuffer.ProtocolMessage):
WRITE_ONLY = 1
READ_WRITE = 2
DELETED = 3
ERROR = 4
_State_NAMES = {
1: "WRITE_ONLY",
2: "READ_WRITE",
3: "DELETED",
4: "ERROR",
}
def State_Name(cls, x): return cls._State_NAMES.get(x, "")
State_Name = classmethod(State_Name)
PENDING = 1
ACTIVE = 2
COMPLETED = 3
_WorkflowState_NAMES = {
1: "PENDING",
2: "ACTIVE",
3: "COMPLETED",
}
def WorkflowState_Name(cls, x): return cls._WorkflowState_NAMES.get(x, "")
WorkflowState_Name = classmethod(WorkflowState_Name)
has_app_id_ = 0
app_id_ = ""
has_id_ = 0
id_ = 0
has_definition_ = 0
has_state_ = 0
state_ = 0
has_workflow_state_ = 0
workflow_state_ = 0
has_error_message_ = 0
error_message_ = ""
has_only_use_if_required_ = 0
only_use_if_required_ = 0
has_disabled_index_ = 0
disabled_index_ = 0
has_write_division_family_ = 0
write_division_family_ = ""
def __init__(self, contents=None):
self.definition_ = Index()
self.read_division_family_ = []
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def id(self): return self.id_
def set_id(self, x):
self.has_id_ = 1
self.id_ = x
def clear_id(self):
if self.has_id_:
self.has_id_ = 0
self.id_ = 0
def has_id(self): return self.has_id_
def definition(self): return self.definition_
def mutable_definition(self): self.has_definition_ = 1; return self.definition_
def clear_definition(self):self.has_definition_ = 0; self.definition_.Clear()
def has_definition(self): return self.has_definition_
def state(self): return self.state_
def set_state(self, x):
self.has_state_ = 1
self.state_ = x
def clear_state(self):
if self.has_state_:
self.has_state_ = 0
self.state_ = 0
def has_state(self): return self.has_state_
def workflow_state(self): return self.workflow_state_
def set_workflow_state(self, x):
self.has_workflow_state_ = 1
self.workflow_state_ = x
def clear_workflow_state(self):
if self.has_workflow_state_:
self.has_workflow_state_ = 0
self.workflow_state_ = 0
def has_workflow_state(self): return self.has_workflow_state_
def error_message(self): return self.error_message_
def set_error_message(self, x):
self.has_error_message_ = 1
self.error_message_ = x
def clear_error_message(self):
if self.has_error_message_:
self.has_error_message_ = 0
self.error_message_ = ""
def has_error_message(self): return self.has_error_message_
def only_use_if_required(self): return self.only_use_if_required_
def set_only_use_if_required(self, x):
self.has_only_use_if_required_ = 1
self.only_use_if_required_ = x
def clear_only_use_if_required(self):
if self.has_only_use_if_required_:
self.has_only_use_if_required_ = 0
self.only_use_if_required_ = 0
def has_only_use_if_required(self): return self.has_only_use_if_required_
def disabled_index(self): return self.disabled_index_
def set_disabled_index(self, x):
self.has_disabled_index_ = 1
self.disabled_index_ = x
def clear_disabled_index(self):
if self.has_disabled_index_:
self.has_disabled_index_ = 0
self.disabled_index_ = 0
def has_disabled_index(self): return self.has_disabled_index_
def read_division_family_size(self): return len(self.read_division_family_)
def read_division_family_list(self): return self.read_division_family_
def read_division_family(self, i):
return self.read_division_family_[i]
def set_read_division_family(self, i, x):
self.read_division_family_[i] = x
def add_read_division_family(self, x):
self.read_division_family_.append(x)
def clear_read_division_family(self):
self.read_division_family_ = []
def write_division_family(self): return self.write_division_family_
def set_write_division_family(self, x):
self.has_write_division_family_ = 1
self.write_division_family_ = x
def clear_write_division_family(self):
if self.has_write_division_family_:
self.has_write_division_family_ = 0
self.write_division_family_ = ""
def has_write_division_family(self): return self.has_write_division_family_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_id()): self.set_id(x.id())
if (x.has_definition()): self.mutable_definition().MergeFrom(x.definition())
if (x.has_state()): self.set_state(x.state())
if (x.has_workflow_state()): self.set_workflow_state(x.workflow_state())
if (x.has_error_message()): self.set_error_message(x.error_message())
if (x.has_only_use_if_required()): self.set_only_use_if_required(x.only_use_if_required())
if (x.has_disabled_index()): self.set_disabled_index(x.disabled_index())
for i in xrange(x.read_division_family_size()): self.add_read_division_family(x.read_division_family(i))
if (x.has_write_division_family()): self.set_write_division_family(x.write_division_family())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_id_ != x.has_id_: return 0
if self.has_id_ and self.id_ != x.id_: return 0
if self.has_definition_ != x.has_definition_: return 0
if self.has_definition_ and self.definition_ != x.definition_: return 0
if self.has_state_ != x.has_state_: return 0
if self.has_state_ and self.state_ != x.state_: return 0
if self.has_workflow_state_ != x.has_workflow_state_: return 0
if self.has_workflow_state_ and self.workflow_state_ != x.workflow_state_: return 0
if self.has_error_message_ != x.has_error_message_: return 0
if self.has_error_message_ and self.error_message_ != x.error_message_: return 0
if self.has_only_use_if_required_ != x.has_only_use_if_required_: return 0
if self.has_only_use_if_required_ and self.only_use_if_required_ != x.only_use_if_required_: return 0
if self.has_disabled_index_ != x.has_disabled_index_: return 0
if self.has_disabled_index_ and self.disabled_index_ != x.disabled_index_: return 0
if len(self.read_division_family_) != len(x.read_division_family_): return 0
for e1, e2 in zip(self.read_division_family_, x.read_division_family_):
if e1 != e2: return 0
if self.has_write_division_family_ != x.has_write_division_family_: return 0
if self.has_write_division_family_ and self.write_division_family_ != x.write_division_family_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
if (not self.has_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: id not set.')
if (not self.has_definition_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: definition not set.')
elif not self.definition_.IsInitialized(debug_strs): initialized = 0
if (not self.has_state_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: state not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += self.lengthVarInt64(self.id_)
n += self.lengthString(self.definition_.ByteSize())
n += self.lengthVarInt64(self.state_)
if (self.has_workflow_state_): n += 1 + self.lengthVarInt64(self.workflow_state_)
if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_))
if (self.has_only_use_if_required_): n += 2
if (self.has_disabled_index_): n += 2
n += 1 * len(self.read_division_family_)
for i in xrange(len(self.read_division_family_)): n += self.lengthString(len(self.read_division_family_[i]))
if (self.has_write_division_family_): n += 1 + self.lengthString(len(self.write_division_family_))
return n + 4
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
if (self.has_id_):
n += 1
n += self.lengthVarInt64(self.id_)
if (self.has_definition_):
n += 1
n += self.lengthString(self.definition_.ByteSizePartial())
if (self.has_state_):
n += 1
n += self.lengthVarInt64(self.state_)
if (self.has_workflow_state_): n += 1 + self.lengthVarInt64(self.workflow_state_)
if (self.has_error_message_): n += 1 + self.lengthString(len(self.error_message_))
if (self.has_only_use_if_required_): n += 2
if (self.has_disabled_index_): n += 2
n += 1 * len(self.read_division_family_)
for i in xrange(len(self.read_division_family_)): n += self.lengthString(len(self.read_division_family_[i]))
if (self.has_write_division_family_): n += 1 + self.lengthString(len(self.write_division_family_))
return n
def Clear(self):
self.clear_app_id()
self.clear_id()
self.clear_definition()
self.clear_state()
self.clear_workflow_state()
self.clear_error_message()
self.clear_only_use_if_required()
self.clear_disabled_index()
self.clear_read_division_family()
self.clear_write_division_family()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(16)
out.putVarInt64(self.id_)
out.putVarInt32(26)
out.putVarInt32(self.definition_.ByteSize())
self.definition_.OutputUnchecked(out)
out.putVarInt32(32)
out.putVarInt32(self.state_)
if (self.has_only_use_if_required_):
out.putVarInt32(48)
out.putBoolean(self.only_use_if_required_)
for i in xrange(len(self.read_division_family_)):
out.putVarInt32(58)
out.putPrefixedString(self.read_division_family_[i])
if (self.has_write_division_family_):
out.putVarInt32(66)
out.putPrefixedString(self.write_division_family_)
if (self.has_disabled_index_):
out.putVarInt32(72)
out.putBoolean(self.disabled_index_)
if (self.has_workflow_state_):
out.putVarInt32(80)
out.putVarInt32(self.workflow_state_)
if (self.has_error_message_):
out.putVarInt32(90)
out.putPrefixedString(self.error_message_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_id_):
out.putVarInt32(16)
out.putVarInt64(self.id_)
if (self.has_definition_):
out.putVarInt32(26)
out.putVarInt32(self.definition_.ByteSizePartial())
self.definition_.OutputPartial(out)
if (self.has_state_):
out.putVarInt32(32)
out.putVarInt32(self.state_)
if (self.has_only_use_if_required_):
out.putVarInt32(48)
out.putBoolean(self.only_use_if_required_)
for i in xrange(len(self.read_division_family_)):
out.putVarInt32(58)
out.putPrefixedString(self.read_division_family_[i])
if (self.has_write_division_family_):
out.putVarInt32(66)
out.putPrefixedString(self.write_division_family_)
if (self.has_disabled_index_):
out.putVarInt32(72)
out.putBoolean(self.disabled_index_)
if (self.has_workflow_state_):
out.putVarInt32(80)
out.putVarInt32(self.workflow_state_)
if (self.has_error_message_):
out.putVarInt32(90)
out.putPrefixedString(self.error_message_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 16:
self.set_id(d.getVarInt64())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_definition().TryMerge(tmp)
continue
if tt == 32:
self.set_state(d.getVarInt32())
continue
if tt == 48:
self.set_only_use_if_required(d.getBoolean())
continue
if tt == 58:
self.add_read_division_family(d.getPrefixedString())
continue
if tt == 66:
self.set_write_division_family(d.getPrefixedString())
continue
if tt == 72:
self.set_disabled_index(d.getBoolean())
continue
if tt == 80:
self.set_workflow_state(d.getVarInt32())
continue
if tt == 90:
self.set_error_message(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_id_: res+=prefix+("id: %s\n" % self.DebugFormatInt64(self.id_))
if self.has_definition_:
res+=prefix+"definition <\n"
res+=self.definition_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_state_: res+=prefix+("state: %s\n" % self.DebugFormatInt32(self.state_))
if self.has_workflow_state_: res+=prefix+("workflow_state: %s\n" % self.DebugFormatInt32(self.workflow_state_))
if self.has_error_message_: res+=prefix+("error_message: %s\n" % self.DebugFormatString(self.error_message_))
if self.has_only_use_if_required_: res+=prefix+("only_use_if_required: %s\n" % self.DebugFormatBool(self.only_use_if_required_))
if self.has_disabled_index_: res+=prefix+("disabled_index: %s\n" % self.DebugFormatBool(self.disabled_index_))
cnt=0
for e in self.read_division_family_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("read_division_family%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_write_division_family_: res+=prefix+("write_division_family: %s\n" % self.DebugFormatString(self.write_division_family_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kapp_id = 1
kid = 2
kdefinition = 3
kstate = 4
kworkflow_state = 10
kerror_message = 11
konly_use_if_required = 6
kdisabled_index = 9
kread_division_family = 7
kwrite_division_family = 8
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "id",
3: "definition",
4: "state",
6: "only_use_if_required",
7: "read_division_family",
8: "write_division_family",
9: "disabled_index",
10: "workflow_state",
11: "error_message",
}, 11)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.STRING,
8: ProtocolBuffer.Encoder.STRING,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.NUMERIC,
11: ProtocolBuffer.Encoder.STRING,
}, 11, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.CompositeIndex'
class SearchIndexEntry(ProtocolBuffer.ProtocolMessage):
has_index_id_ = 0
index_id_ = 0
has_write_division_family_ = 0
write_division_family_ = ""
has_fingerprint_1999_ = 0
fingerprint_1999_ = 0
has_fingerprint_2011_ = 0
fingerprint_2011_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def index_id(self): return self.index_id_
def set_index_id(self, x):
self.has_index_id_ = 1
self.index_id_ = x
def clear_index_id(self):
if self.has_index_id_:
self.has_index_id_ = 0
self.index_id_ = 0
def has_index_id(self): return self.has_index_id_
def write_division_family(self): return self.write_division_family_
def set_write_division_family(self, x):
self.has_write_division_family_ = 1
self.write_division_family_ = x
def clear_write_division_family(self):
if self.has_write_division_family_:
self.has_write_division_family_ = 0
self.write_division_family_ = ""
def has_write_division_family(self): return self.has_write_division_family_
def fingerprint_1999(self): return self.fingerprint_1999_
def set_fingerprint_1999(self, x):
self.has_fingerprint_1999_ = 1
self.fingerprint_1999_ = x
def clear_fingerprint_1999(self):
if self.has_fingerprint_1999_:
self.has_fingerprint_1999_ = 0
self.fingerprint_1999_ = 0
def has_fingerprint_1999(self): return self.has_fingerprint_1999_
def fingerprint_2011(self): return self.fingerprint_2011_
def set_fingerprint_2011(self, x):
self.has_fingerprint_2011_ = 1
self.fingerprint_2011_ = x
def clear_fingerprint_2011(self):
if self.has_fingerprint_2011_:
self.has_fingerprint_2011_ = 0
self.fingerprint_2011_ = 0
def has_fingerprint_2011(self): return self.has_fingerprint_2011_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_id()): self.set_index_id(x.index_id())
if (x.has_write_division_family()): self.set_write_division_family(x.write_division_family())
if (x.has_fingerprint_1999()): self.set_fingerprint_1999(x.fingerprint_1999())
if (x.has_fingerprint_2011()): self.set_fingerprint_2011(x.fingerprint_2011())
def Equals(self, x):
if x is self: return 1
if self.has_index_id_ != x.has_index_id_: return 0
if self.has_index_id_ and self.index_id_ != x.index_id_: return 0
if self.has_write_division_family_ != x.has_write_division_family_: return 0
if self.has_write_division_family_ and self.write_division_family_ != x.write_division_family_: return 0
if self.has_fingerprint_1999_ != x.has_fingerprint_1999_: return 0
if self.has_fingerprint_1999_ and self.fingerprint_1999_ != x.fingerprint_1999_: return 0
if self.has_fingerprint_2011_ != x.has_fingerprint_2011_: return 0
if self.has_fingerprint_2011_ and self.fingerprint_2011_ != x.fingerprint_2011_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_id not set.')
if (not self.has_write_division_family_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: write_division_family not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.index_id_)
n += self.lengthString(len(self.write_division_family_))
if (self.has_fingerprint_1999_): n += 9
if (self.has_fingerprint_2011_): n += 9
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_index_id_):
n += 1
n += self.lengthVarInt64(self.index_id_)
if (self.has_write_division_family_):
n += 1
n += self.lengthString(len(self.write_division_family_))
if (self.has_fingerprint_1999_): n += 9
if (self.has_fingerprint_2011_): n += 9
return n
def Clear(self):
self.clear_index_id()
self.clear_write_division_family()
self.clear_fingerprint_1999()
self.clear_fingerprint_2011()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.index_id_)
out.putVarInt32(18)
out.putPrefixedString(self.write_division_family_)
if (self.has_fingerprint_1999_):
out.putVarInt32(25)
out.put64(self.fingerprint_1999_)
if (self.has_fingerprint_2011_):
out.putVarInt32(33)
out.put64(self.fingerprint_2011_)
def OutputPartial(self, out):
if (self.has_index_id_):
out.putVarInt32(8)
out.putVarInt64(self.index_id_)
if (self.has_write_division_family_):
out.putVarInt32(18)
out.putPrefixedString(self.write_division_family_)
if (self.has_fingerprint_1999_):
out.putVarInt32(25)
out.put64(self.fingerprint_1999_)
if (self.has_fingerprint_2011_):
out.putVarInt32(33)
out.put64(self.fingerprint_2011_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_index_id(d.getVarInt64())
continue
if tt == 18:
self.set_write_division_family(d.getPrefixedString())
continue
if tt == 25:
self.set_fingerprint_1999(d.get64())
continue
if tt == 33:
self.set_fingerprint_2011(d.get64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_id_: res+=prefix+("index_id: %s\n" % self.DebugFormatInt64(self.index_id_))
if self.has_write_division_family_: res+=prefix+("write_division_family: %s\n" % self.DebugFormatString(self.write_division_family_))
if self.has_fingerprint_1999_: res+=prefix+("fingerprint_1999: %s\n" % self.DebugFormatFixed64(self.fingerprint_1999_))
if self.has_fingerprint_2011_: res+=prefix+("fingerprint_2011: %s\n" % self.DebugFormatFixed64(self.fingerprint_2011_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_id = 1
kwrite_division_family = 2
kfingerprint_1999 = 3
kfingerprint_2011 = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_id",
2: "write_division_family",
3: "fingerprint_1999",
4: "fingerprint_2011",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.DOUBLE,
4: ProtocolBuffer.Encoder.DOUBLE,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.SearchIndexEntry'
class SearchIndexExternalId(ProtocolBuffer.ProtocolMessage):
has_index_id_ = 0
index_id_ = 0
has_primary_key_ = 0
def __init__(self, contents=None):
self.primary_key_ = Reference()
if contents is not None: self.MergeFromString(contents)
def index_id(self): return self.index_id_
def set_index_id(self, x):
self.has_index_id_ = 1
self.index_id_ = x
def clear_index_id(self):
if self.has_index_id_:
self.has_index_id_ = 0
self.index_id_ = 0
def has_index_id(self): return self.has_index_id_
def primary_key(self): return self.primary_key_
def mutable_primary_key(self): self.has_primary_key_ = 1; return self.primary_key_
def clear_primary_key(self):self.has_primary_key_ = 0; self.primary_key_.Clear()
def has_primary_key(self): return self.has_primary_key_
def MergeFrom(self, x):
assert x is not self
if (x.has_index_id()): self.set_index_id(x.index_id())
if (x.has_primary_key()): self.mutable_primary_key().MergeFrom(x.primary_key())
def Equals(self, x):
if x is self: return 1
if self.has_index_id_ != x.has_index_id_: return 0
if self.has_index_id_ and self.index_id_ != x.index_id_: return 0
if self.has_primary_key_ != x.has_primary_key_: return 0
if self.has_primary_key_ and self.primary_key_ != x.primary_key_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_index_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: index_id not set.')
if (not self.has_primary_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: primary_key not set.')
elif not self.primary_key_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.index_id_)
n += self.lengthString(self.primary_key_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_index_id_):
n += 1
n += self.lengthVarInt64(self.index_id_)
if (self.has_primary_key_):
n += 1
n += self.lengthString(self.primary_key_.ByteSizePartial())
return n
def Clear(self):
self.clear_index_id()
self.clear_primary_key()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.index_id_)
out.putVarInt32(18)
out.putVarInt32(self.primary_key_.ByteSize())
self.primary_key_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_index_id_):
out.putVarInt32(8)
out.putVarInt64(self.index_id_)
if (self.has_primary_key_):
out.putVarInt32(18)
out.putVarInt32(self.primary_key_.ByteSizePartial())
self.primary_key_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_index_id(d.getVarInt64())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_primary_key().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_index_id_: res+=prefix+("index_id: %s\n" % self.DebugFormatInt64(self.index_id_))
if self.has_primary_key_:
res+=prefix+"primary_key <\n"
res+=self.primary_key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_id = 1
kprimary_key = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_id",
2: "primary_key",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.SearchIndexExternalId'
class IndexPostfix_IndexValue(ProtocolBuffer.ProtocolMessage):
has_property_name_ = 0
property_name_ = ""
has_value_ = 0
def __init__(self, contents=None):
self.value_ = PropertyValue()
if contents is not None: self.MergeFromString(contents)
def property_name(self): return self.property_name_
def set_property_name(self, x):
self.has_property_name_ = 1
self.property_name_ = x
def clear_property_name(self):
if self.has_property_name_:
self.has_property_name_ = 0
self.property_name_ = ""
def has_property_name(self): return self.has_property_name_
def value(self): return self.value_
def mutable_value(self): self.has_value_ = 1; return self.value_
def clear_value(self):self.has_value_ = 0; self.value_.Clear()
def has_value(self): return self.has_value_
def MergeFrom(self, x):
assert x is not self
if (x.has_property_name()): self.set_property_name(x.property_name())
if (x.has_value()): self.mutable_value().MergeFrom(x.value())
def Equals(self, x):
if x is self: return 1
if self.has_property_name_ != x.has_property_name_: return 0
if self.has_property_name_ and self.property_name_ != x.property_name_: return 0
if self.has_value_ != x.has_value_: return 0
if self.has_value_ and self.value_ != x.value_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_property_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: property_name not set.')
if (not self.has_value_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: value not set.')
elif not self.value_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.property_name_))
n += self.lengthString(self.value_.ByteSize())
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_property_name_):
n += 1
n += self.lengthString(len(self.property_name_))
if (self.has_value_):
n += 1
n += self.lengthString(self.value_.ByteSizePartial())
return n
def Clear(self):
self.clear_property_name()
self.clear_value()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.property_name_)
out.putVarInt32(18)
out.putVarInt32(self.value_.ByteSize())
self.value_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_property_name_):
out.putVarInt32(10)
out.putPrefixedString(self.property_name_)
if (self.has_value_):
out.putVarInt32(18)
out.putVarInt32(self.value_.ByteSizePartial())
self.value_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_property_name(d.getPrefixedString())
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_value().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_property_name_: res+=prefix+("property_name: %s\n" % self.DebugFormatString(self.property_name_))
if self.has_value_:
res+=prefix+"value <\n"
res+=self.value_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kproperty_name = 1
kvalue = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "property_name",
2: "value",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.IndexPostfix_IndexValue'
class IndexPostfix(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
key_ = None
has_before_ = 0
before_ = 1
has_before_ascending_ = 0
before_ascending_ = 0
def __init__(self, contents=None):
self.index_value_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def index_value_size(self): return len(self.index_value_)
def index_value_list(self): return self.index_value_
def index_value(self, i):
return self.index_value_[i]
def mutable_index_value(self, i):
return self.index_value_[i]
def add_index_value(self):
x = IndexPostfix_IndexValue()
self.index_value_.append(x)
return x
def clear_index_value(self):
self.index_value_ = []
def key(self):
if self.key_ is None:
self.lazy_init_lock_.acquire()
try:
if self.key_ is None: self.key_ = Reference()
finally:
self.lazy_init_lock_.release()
return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key()
def clear_key(self):
if self.has_key_:
self.has_key_ = 0;
if self.key_ is not None: self.key_.Clear()
def has_key(self): return self.has_key_
def before(self): return self.before_
def set_before(self, x):
self.has_before_ = 1
self.before_ = x
def clear_before(self):
if self.has_before_:
self.has_before_ = 0
self.before_ = 1
def has_before(self): return self.has_before_
def before_ascending(self): return self.before_ascending_
def set_before_ascending(self, x):
self.has_before_ascending_ = 1
self.before_ascending_ = x
def clear_before_ascending(self):
if self.has_before_ascending_:
self.has_before_ascending_ = 0
self.before_ascending_ = 0
def has_before_ascending(self): return self.has_before_ascending_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.index_value_size()): self.add_index_value().CopyFrom(x.index_value(i))
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_before()): self.set_before(x.before())
if (x.has_before_ascending()): self.set_before_ascending(x.before_ascending())
def Equals(self, x):
if x is self: return 1
if len(self.index_value_) != len(x.index_value_): return 0
for e1, e2 in zip(self.index_value_, x.index_value_):
if e1 != e2: return 0
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_before_ != x.has_before_: return 0
if self.has_before_ and self.before_ != x.before_: return 0
if self.has_before_ascending_ != x.has_before_ascending_: return 0
if self.has_before_ascending_ and self.before_ascending_ != x.before_ascending_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.index_value_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_key_ and not self.key_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.index_value_)
for i in xrange(len(self.index_value_)): n += self.lengthString(self.index_value_[i].ByteSize())
if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSize())
if (self.has_before_): n += 2
if (self.has_before_ascending_): n += 2
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.index_value_)
for i in xrange(len(self.index_value_)): n += self.lengthString(self.index_value_[i].ByteSizePartial())
if (self.has_key_): n += 1 + self.lengthString(self.key_.ByteSizePartial())
if (self.has_before_): n += 2
if (self.has_before_ascending_): n += 2
return n
def Clear(self):
self.clear_index_value()
self.clear_key()
self.clear_before()
self.clear_before_ascending()
def OutputUnchecked(self, out):
for i in xrange(len(self.index_value_)):
out.putVarInt32(10)
out.putVarInt32(self.index_value_[i].ByteSize())
self.index_value_[i].OutputUnchecked(out)
if (self.has_key_):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
if (self.has_before_):
out.putVarInt32(24)
out.putBoolean(self.before_)
if (self.has_before_ascending_):
out.putVarInt32(32)
out.putBoolean(self.before_ascending_)
def OutputPartial(self, out):
for i in xrange(len(self.index_value_)):
out.putVarInt32(10)
out.putVarInt32(self.index_value_[i].ByteSizePartial())
self.index_value_[i].OutputPartial(out)
if (self.has_key_):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
if (self.has_before_):
out.putVarInt32(24)
out.putBoolean(self.before_)
if (self.has_before_ascending_):
out.putVarInt32(32)
out.putBoolean(self.before_ascending_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_index_value().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 24:
self.set_before(d.getBoolean())
continue
if tt == 32:
self.set_before_ascending(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.index_value_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("index_value%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_before_: res+=prefix+("before: %s\n" % self.DebugFormatBool(self.before_))
if self.has_before_ascending_: res+=prefix+("before_ascending: %s\n" % self.DebugFormatBool(self.before_ascending_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kindex_value = 1
kkey = 2
kbefore = 3
kbefore_ascending = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "index_value",
2: "key",
3: "before",
4: "before_ascending",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.IndexPostfix'
class IndexPosition(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
key_ = ""
has_before_ = 0
before_ = 1
has_before_ascending_ = 0
before_ascending_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def set_key(self, x):
self.has_key_ = 1
self.key_ = x
def clear_key(self):
if self.has_key_:
self.has_key_ = 0
self.key_ = ""
def has_key(self): return self.has_key_
def before(self): return self.before_
def set_before(self, x):
self.has_before_ = 1
self.before_ = x
def clear_before(self):
if self.has_before_:
self.has_before_ = 0
self.before_ = 1
def has_before(self): return self.has_before_
def before_ascending(self): return self.before_ascending_
def set_before_ascending(self, x):
self.has_before_ascending_ = 1
self.before_ascending_ = x
def clear_before_ascending(self):
if self.has_before_ascending_:
self.has_before_ascending_ = 0
self.before_ascending_ = 0
def has_before_ascending(self): return self.has_before_ascending_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.set_key(x.key())
if (x.has_before()): self.set_before(x.before())
if (x.has_before_ascending()): self.set_before_ascending(x.before_ascending())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_before_ != x.has_before_: return 0
if self.has_before_ and self.before_ != x.before_: return 0
if self.has_before_ascending_ != x.has_before_ascending_: return 0
if self.has_before_ascending_ and self.before_ascending_ != x.before_ascending_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_key_): n += 1 + self.lengthString(len(self.key_))
if (self.has_before_): n += 2
if (self.has_before_ascending_): n += 2
return n
def ByteSizePartial(self):
n = 0
if (self.has_key_): n += 1 + self.lengthString(len(self.key_))
if (self.has_before_): n += 2
if (self.has_before_ascending_): n += 2
return n
def Clear(self):
self.clear_key()
self.clear_before()
self.clear_before_ascending()
def OutputUnchecked(self, out):
if (self.has_key_):
out.putVarInt32(10)
out.putPrefixedString(self.key_)
if (self.has_before_):
out.putVarInt32(16)
out.putBoolean(self.before_)
if (self.has_before_ascending_):
out.putVarInt32(24)
out.putBoolean(self.before_ascending_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(10)
out.putPrefixedString(self.key_)
if (self.has_before_):
out.putVarInt32(16)
out.putBoolean(self.before_)
if (self.has_before_ascending_):
out.putVarInt32(24)
out.putBoolean(self.before_ascending_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_key(d.getPrefixedString())
continue
if tt == 16:
self.set_before(d.getBoolean())
continue
if tt == 24:
self.set_before_ascending(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_: res+=prefix+("key: %s\n" % self.DebugFormatString(self.key_))
if self.has_before_: res+=prefix+("before: %s\n" % self.DebugFormatBool(self.before_))
if self.has_before_ascending_: res+=prefix+("before_ascending: %s\n" % self.DebugFormatBool(self.before_ascending_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kkey = 1
kbefore = 2
kbefore_ascending = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "key",
2: "before",
3: "before_ascending",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'storage_onestore_v3.IndexPosition'
if _extension_runtime:
pass
__all__ = ['PropertyValue','PropertyValue_ReferenceValuePathElement','PropertyValue_PointValue','PropertyValue_UserValue','PropertyValue_ReferenceValue','Property','Path','Path_Element','Reference','User','EntityProto','EntityMetadata','CompositeProperty','Index','Index_Property','CompositeIndex','SearchIndexEntry','SearchIndexExternalId','IndexPostfix_IndexValue','IndexPostfix','IndexPosition']
| mit | 7,470,339,447,410,824,000 | 29.12347 | 398 | 0.639254 | false | 3.184076 | false | false | false | 0.021024 |
hyperized/ansible | lib/ansible/module_utils/common/parameters.py | 7 | 5492 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils._text import to_native
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.common.collections import is_iterable
from ansible.module_utils.six import (
binary_type,
integer_types,
text_type,
)
# Python2 & 3 way to get NoneType
NoneType = type(None)
# if adding boolean attribute, also add to PASS_BOOL
# some of this dupes defaults from controller config
PASS_VARS = {
'check_mode': ('check_mode', False),
'debug': ('_debug', False),
'diff': ('_diff', False),
'keep_remote_files': ('_keep_remote_files', False),
'module_name': ('_name', None),
'no_log': ('no_log', False),
'remote_tmp': ('_remote_tmp', None),
'selinux_special_fs': ('_selinux_special_fs', ['fuse', 'nfs', 'vboxsf', 'ramfs', '9p', 'vfat']),
'shell_executable': ('_shell', '/bin/sh'),
'socket': ('_socket_path', None),
'string_conversion_action': ('_string_conversion_action', 'warn'),
'syslog_facility': ('_syslog_facility', 'INFO'),
'tmpdir': ('_tmpdir', None),
'verbosity': ('_verbosity', 0),
'version': ('ansible_version', '0.0'),
}
PASS_BOOLS = ('check_mode', 'debug', 'diff', 'keep_remote_files', 'no_log')
def _return_datastructure_name(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in _return_datastructure_name(element[1]):
yield subelement
elif is_iterable(obj):
for element in obj:
for subelement in _return_datastructure_name(element):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, tuple(list(integer_types) + [float])):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def list_no_log_values(argument_spec, params):
"""Return set of no log values
:arg argument_spec: An argument spec dictionary from a module
:arg params: Dictionary of all module parameters
:returns: Set of strings that should be hidden from output::
{'secret_dict_value', 'secret_list_item_one', 'secret_list_item_two', 'secret_string'}
"""
no_log_values = set()
for arg_name, arg_opts in argument_spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = params.get(arg_name, None)
if no_log_object:
no_log_values.update(_return_datastructure_name(no_log_object))
return no_log_values
def list_deprecations(argument_spec, params):
"""Return a list of deprecations
:arg argument_spec: An argument spec dictionary from a module
:arg params: Dictionary of all module parameters
:returns: List of dictionaries containing a message and version in which
the deprecated parameter will be removed, or an empty list::
[{'msg': "Param 'deptest' is deprecated. See the module docs for more information", 'version': '2.9'}]
"""
deprecations = []
for arg_name, arg_opts in argument_spec.items():
if arg_opts.get('removed_in_version') is not None and arg_name in params:
deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
return deprecations
def handle_aliases(argument_spec, params, alias_warnings=None):
"""Return a two item tuple. The first is a dictionary of aliases, the second is
a list of legal inputs.
If a list is provided to the alias_warnings parameter, it will be filled with tuples
(option, alias) in every case where both an option and its alias are specified.
"""
legal_inputs = ['_ansible_%s' % k for k in PASS_VARS]
aliases_results = {} # alias:canon
for (k, v) in argument_spec.items():
legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise ValueError("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not is_iterable(aliases) or isinstance(aliases, (binary_type, text_type)):
raise TypeError('internal error: aliases must be a list or tuple')
for alias in aliases:
legal_inputs.append(alias)
aliases_results[alias] = k
if alias in params:
if k in params and alias_warnings is not None:
alias_warnings.append((k, alias))
params[k] = params[alias]
return aliases_results, legal_inputs
| gpl-3.0 | -1,927,385,111,595,900,000 | 36.616438 | 114 | 0.636744 | false | 3.917261 | false | false | false | 0.002003 |
queer1/bitcurator | python/bc_disk_access.py | 1 | 31974 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# BitCurator
#
# This code is distributed under the terms of the GNU General Public
# License, Version 3. See the text file "COPYING" for further details
# about the terms of this license.
#
# bc_disk_access --dfxmlfile <file> --filename <oufile>
#
# 1. Ex: Cat:
# python3 bc_disk_access.py --image ~/aaa/charlie-work-usb-2009-12-11.aff \
# [--dfxmlfile ~/aaa/charlie_xml] --filename \
# Email/Charlie_2009-12-04_0941_Sent.txt --cat
#
# 2. Ex: filelist:
# $ python3 bc_disk_access.py
# --image ~/aaa/charlie-work-usb-2009-12-11.aff
# [--dfxmlfile ~/aaa/charlie_xml] \
# --listfiles
# 3. Invoked through BitCurator GUI
####################################################################
# The basic GUI is designed using PyQT4 Designer. Code manually added
# to QTreeView and for the functionality of all widgets.
# From the DFXML file, the "filename" attribute is read using
# fiwalk.fiwalk_using_sax() API. The list of file-paths is stored in
# the dictionary fiDictList.
# To store the Tree structure of the directory hierarchy, the QStandardItemModel
# class of the QtPy4's Model/View framework is used:
# http://pyqt.sourceforge.net/Docs/PyQt4/qstandarditemmodel.html#details
####################################################################
import os, fiwalk, sys
from PyQt4 import QtCore, QtGui
import subprocess
from subprocess import Popen,PIPE
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
try:
from argparse import ArgumentParser
except ImportError:
raise ImportError("This script requires ArgumentParser which is in Python 2.7 or Python 3.0")
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## app = QtGui.QApplication(sys.argv)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# init widgets
global g_model
global g_image
global g_dfxmlfile
global isGenDfxmlFile
class Ui_MainWindow(object):
def __init__(self, outdir=None):
self.outdir = outdir
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(835, 565)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.pushButton_close = QtGui.QPushButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_close.sizePolicy().hasHeightForWidth())
self.pushButton_close.setSizePolicy(sizePolicy)
self.pushButton_close.setObjectName(_fromUtf8("pushButton_close"))
self.gridLayout.addWidget(self.pushButton_close, 5, 0, 1, 1)
self.label = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 6, 1, 1)
self.textEdit = QtGui.QTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(20)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setAutoFillBackground(True)
self.textEdit.setStyleSheet(_fromUtf8("background-color: rgb(200, 206, 200);\n"
"border-color: rgb(170, 0, 0);"))
self.textEdit.setTextInteractionFlags(QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.gridLayout.addWidget(self.textEdit, 1, 6, 1, 1)
global g_textEdit
g_textEdit = self.textEdit
self.DirectoryTree = QtGui.QTreeView(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.DirectoryTree.sizePolicy().hasHeightForWidth())
# Note:
# The following line was added in an attempt to get the horizontal
# scroll bar automatically when there is text longer than the window size.
# But with or without this line, it still needs one to drag the top bar
# to the right to make the scroll bar start working. Could be a bug with
# pyQT4 implementation.
self.DirectoryTree.header().setResizeMode(0, QtGui.QHeaderView.ResizeToContents)
self.DirectoryTree.setSizePolicy(sizePolicy)
self.DirectoryTree.setSizeIncrement(QtCore.QSize(0, 0))
self.DirectoryTree.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.DirectoryTree.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.DirectoryTree.setObjectName(_fromUtf8("DirectoryTree"))
self.DirectoryTree.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.gridLayout.addWidget(self.DirectoryTree, 1, 0, 1, 6)
self.model = QtGui.QStandardItemModel()
self.DirectoryTree.setModel(self.model)
self.DirectoryTree.setUniformRowHeights(True)
global g_model
g_model = self.model
g_model.setHorizontalHeaderLabels(['File Structure'])
self.pushButton_export = QtGui.QPushButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_export.sizePolicy().hasHeightForWidth())
self.pushButton_export.setSizePolicy(sizePolicy)
self.pushButton_export.setObjectName(_fromUtf8("pushButton_export"))
self.gridLayout.addWidget(self.pushButton_export, 5, 1, 1, 2)
self.pushButton_dsall = QtGui.QPushButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_dsall.sizePolicy().hasHeightForWidth())
self.pushButton_dsall.setSizePolicy(sizePolicy)
self.pushButton_dsall.setObjectName(_fromUtf8("pushButton_dsall"))
self.gridLayout.addWidget(self.pushButton_dsall, 5, 3, 1, 1)
self.pushButton_sall = QtGui.QPushButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_sall.sizePolicy().hasHeightForWidth())
self.pushButton_sall.setSizePolicy(sizePolicy)
self.pushButton_sall.setObjectName(_fromUtf8("pushButton_sall"))
self.gridLayout.addWidget(self.pushButton_sall, 5, 4, 1, 2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 835, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionSelect_All = QtGui.QAction(MainWindow)
self.actionSelect_All.setObjectName(_fromUtf8("actionSelect_All"))
self.actionDeSelect_All = QtGui.QAction(MainWindow)
self.actionDeSelect_All.setObjectName(_fromUtf8("actionDeSelect_All"))
self.menuFile.addAction(self.actionExit)
self.menuHelp.addAction(self.actionSelect_All)
self.menuHelp.addAction(self.actionDeSelect_All)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.actionExit.triggered.connect(self.exitMenu)
self.actionSelect_All.triggered.connect(self.selectAllMenu)
self.actionDeSelect_All.triggered.connect(self.deSelectAllMenu)
# File navigation for Export
QtCore.QObject.connect(self.pushButton_export, QtCore.SIGNAL(_fromUtf8("clicked()")), self.buttonClickedExport)
# Handle the Close button
QtCore.QObject.connect(self.pushButton_close, QtCore.SIGNAL(_fromUtf8("clicked()")), self.buttonClickedClose)
# Handle the Select button
QtCore.QObject.connect(self.pushButton_sall, QtCore.SIGNAL(_fromUtf8("clicked()")), self.buttonClickedSelectAll)
# Handle the DeSelect button
QtCore.QObject.connect(self.pushButton_dsall, QtCore.SIGNAL(_fromUtf8("clicked()")), self.buttonClickedDeSelectAll)
'''
# Handle the Dump button
QtCore.QObject.connect(self.pushButton_dump, QtCore.SIGNAL(_fromUtf8("clicked()")), self.buttonClickedDump)
'''
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def exitMenu(self):
QtCore.QCoreApplication.instance().quit()
def selectAllMenu(self):
BcFileStructure.bcOperateOnFiles(BcFileStructure, 1, None)
def deSelectAllMenu(self):
BcFileStructure.bcOperateOnFiles(BcFileStructure, 0, None)
def buttonClickedClose(self):
# if dfxml file was internally generated, remove it.
global isGenDfxmlFile
if isGenDfxmlFile == True:
os.system('rm '+g_dfxmlfile)
QtCore.QCoreApplication.instance().quit()
def buttonClickedExport(self):
# If invoked thorugh reports_tab gui, the outdir provided is the
# exportDir and so there is no need to choose again. If invoked
# through command line, output directory to export the checked files
# needs to be provided now, through file navigation
if self.outdir == None:
os.chdir(os.environ["HOME"])
exportDir = QtGui.QFileDialog.getExistingDirectory(caption="Select an Output Directory to export files")
else:
exportDir = self.outdir
## print(">> D: Output Directory Selected: ", exportDir)
self.oldstdout = sys.stdout
sys.stdout = StringIO()
# Now loop through the checked files and dump them in this directory
BcFileStructure.bcOperateOnFiles(BcFileStructure, 2, exportDir)
print(">> Copied Checked files to the directory: ", exportDir)
global g_textEdit
g_textEdit.setText( sys.stdout.getvalue() )
sys.stdout = self.oldstdout
'''
def buttonClickedDump(self):
BcFileStructure.bcOperateOnFiles(BcFileStructure, 3, None)
'''
def buttonClickedSelectAll(self):
BcFileStructure.bcOperateOnFiles(BcFileStructure, 1, None)
def buttonClickedDeSelectAll(self):
BcFileStructure.bcOperateOnFiles(BcFileStructure, 0, None)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Disk Image Access Interface", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_export.setText(QtGui.QApplication.translate("MainWindow", "Export", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_close.setText(QtGui.QApplication.translate("MainWindow", "Close", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_sall.setText(QtGui.QApplication.translate("MainWindow", "Select All", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_dsall.setText(QtGui.QApplication.translate("MainWindow", "DeSelect All", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Command Line Output", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("MainWindow", "File", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("MainWindow", "Edit", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit.setText(QtGui.QApplication.translate("MainWindow", "Exit", None, QtGui.QApplication.UnicodeUTF8))
self.actionSelect_All.setText(QtGui.QApplication.translate("MainWindow", "Select All", None, QtGui.QApplication.UnicodeUTF8))
self.actionDeSelect_All.setText(QtGui.QApplication.translate("MainWindow", "DeSelect All", None, QtGui.QApplication.UnicodeUTF8))
class BcFileStructure:
acc_dict_array = ["filename", "partition", "inode", "name_type", "filesize"]
fiDictList = []
parentlist = []
file_item_of = dict()
path_of = dict()
# bcOperateOnFiles()
# Iterate through the leaves of the file structure and check/uncheck
# all the files based on whether "check" is True or False.
# This same routine is reused with the parameter "cehck" set to 2,
# to dump the contents of the "checked" files to the specified output
# directory. It is again used with check=3 to dump the contents of a
# file to the textEdit window.
def bcOperateOnFiles(self, check, exportDir):
## print(">>D: LENGTH of fiDictList: ", len(self.fiDictList))
for i in range(0, len(self.fiDictList) - 1):
path = self.fiDictList[i]['filename']
if self.fiDictList[i]['name_type'] == 'd':
isdir = True
else:
isdir = False
pathlist = path.split('/')
pathlen = len(pathlist)
## print("D: Path LiSt: ", pathlist, len(pathlist))
## print("D: =================")
last_elem = pathlist[pathlen-1]
if last_elem == "." or last_elem == "..":
# Ignore . and ..
continue
if isdir == False:
# First get the name of the current file
current_fileordir = pathlist[pathlen-1]
# Now using the dict of files, file_item_of, get the item
# for this file
current_item = self.file_item_of[current_fileordir]
if check == 1:
## print("D: Setting File to Checked_state ", current_fileordir)
current_item.setCheckState(2)
elif check == 0:
current_item.setCheckState(0)
elif check == 2:
# If "check" is 2, we use this routine to dump the
# contents of the specified file to the specified output
# file.
# If this file is "checked", download its contents.
# item.checkState has 0 if not checked, 1 if partially
# checked and 2 if checked.
# http://qt.developpez.com/doc/4.6/qt/#checkstate-enum
if current_item.checkState() == 2:
## print(">> D: File %s is Checked" %current_fileordir)
if not os.path.exists(exportDir):
os.mkdir(exportDir)
pathlist = path.split('/')
oldDir = newDir = exportDir
# Iterate through the path list and make the directories
# in the path, if they don't already exist.
for k in range(0, len(pathlist)-1):
newDir = oldDir + '/' + pathlist[k]
if not os.path.exists(newDir):
os.mkdir(newDir)
oldDir = newDir
outfile = newDir + '/'+current_fileordir
## print(">> D: Writing to Outfile: ", outfile, path)
filestr.bcCatFile(path, g_image, g_dfxmlfile, True, outfile)
elif current_item.checkState() == 1:
print("Partially checked state: ",current_item.checkState())
print("File %s is NOT Checked" %current_fileordir)
g_textEdit.setText( sys.stdout.getvalue() )
sys.stdout = self.oldstdout
elif check == 3:
# Dump the first checked File in textEdit window
if current_item.checkState() == 2:
print(">> D: File %s is Checked" %current_fileordir)
self.oldstdout = sys.stdout
sys.stdout = StringIO()
## print("D: >> Dumping the contents of the file ", path)
filestr.bcCatFile(path, g_image, g_dfxmlfile, False, None)
g_textEdit.setText( sys.stdout.getvalue() )
sys.stdout = self.oldstdout
# We list only the first checked file.
return
elif current_item.checkState() == 1:
print("Partially checked state: ",current_item.checkState())
print("File %s is NOT Checked" %current_fileordir)
g_textEdit.setText( sys.stdout.getvalue() )
sys.stdout = self.oldstdout
def bcHandleSpecialChars(self, filename):
#filename = filename.replace("$", "\$")
#filename = filename.replace(" ", "\ ")
#filename = filename.replace("(", "\(")
#filename = filename.replace(")", "\)")
return re.escape(filename)
def bcGetFilenameFromPath(self, path):
pathlist = path.split('/')
pathlen = len(pathlist)
filename = pathlist[pathlen-1]
# Prepend special characters with backslash
filename = self.bcHandleSpecialChars(filename)
return filename
# bcExtractFileStr()
# This routine extracts the file structure given a disk image and the
# corresponding dfxml file.
def bcExtractFileStr(self, image, dfxmlfile, outdir):
# Extract the information from dfxml file to create the
# dictionary only if it is not done before.
if len(self.fiDictList) == 0:
self.bcProcessDfxmlFileUsingSax(dfxmlfile)
## print("D: Length of dictionary fiDictList: ", len(self.fiDictList))
parent0 = QtGui.QStandardItem('Disk Image: {}'.format(image))
current_fileordir = image
parent_dir_item = parent0
global g_image
global g_dfxmlfile
g_image = re.escape(image)
g_dfxmlfile = dfxmlfile
# A dictionary item_of{} is maintained which contains each file/
# directory and its corresponding " tree item" as its value.
item_of = dict()
item_of[image] = parent0
for i in range(0, len(self.fiDictList) - 1):
path = self.fiDictList[i]['filename']
if self.fiDictList[i]['name_type'] == 'd':
isdir = True
else:
isdir = False
pathlist = path.split('/')
pathlen = len(pathlist)
## print("D: Path LiSt: ", pathlist, len(pathlist))
## print("D: =================")
last_elem = pathlist[pathlen-1]
if last_elem == "." or last_elem == "..":
# Ignore . and ..
continue
if isdir == True:
if (pathlen < 2):
# If pathlen is < 2 it is a file/dir directly off the root.
parent = parent0
else:
parent = item_of[pathlist[pathlen-2]]
current_dir = pathlist[pathlen-1]
## print("D: Set Current_dir to: ", current_dir)
current_dir_item = QtGui.QStandardItem(current_dir)
parent_dir_item.appendRow(current_dir_item)
# Save the item of this directory
item_of[current_dir] = current_dir_item
else:
# File: The file could be in any level - top level is the
# child of parent0 (disk img). The level is sensed by the
# pathlen
current_fileordir = pathlist[pathlen-1]
current_item = QtGui.QStandardItem(current_fileordir)
current_item.setCheckable(True)
current_item.setCheckState(0)
# save the "item" of each file
self.file_item_of[current_fileordir] = current_item
## print("D: Adding child to parent: ", pathlist[pathlen-2], parent0)
if pathlen > 1:
parent_dir_item = item_of[pathlist[pathlen-2]]
else:
parent_dir_item = parent0
parent_dir_item.appendRow(current_item)
parent = parent_dir_item
global g_model
g_model.appendRow(parent)
def bcCatFile(self, filename, image, dfxmlfile, redirect_file, outfile):
# Traverse the XML file, get the file_name, extract the inode number
# of the file and run icat to extract the data.
## print(">>D: bcCatFile: Filename: ", filename)
## print(">>D: bcCatFile: image: ", image)
## print(">>D: bcCatFile: dfxmlfile: ", dfxmlfile)
## print(">>D: bcCatFile: outfile: ", outfile)
# First traverse through dfxmlfile to get the block containing
# "filename" to extract the inode. Do this just once.
if len(self.fiDictList) == 0:
self.bcProcessDfxmlFileUsingSax(dfxmlfile)
## print("D: Length of fiDictList ", len(self.fiDictList))
# Dictionary is formed. Now traverse through the array and
# in each dictionary, get the inode and call iCat command.
for i in range(0, len(self.fiDictList)-1):
if (self.fiDictList[i]['filename'] == filename):
## print("D: Extracting the contents of the file:inode ", \
## filename, self.fiDictList[i]['inode'])
# First get the offset of the 2nd partition using mmls cmd
# ex: mmls -i aff ~/aaa/jo-favorites-usb-2009-12-11.aff
if image.endswith(".E01") or image.endswith(".e01"):
imgtype = 'ewf'
elif image.endswith(".aff") or image.endswith(".AFF"):
imgtype = 'aff'
mmls_cmd = "mmls -i " + imgtype +" "+image +" | grep \"02:\""
## print("D: Executing mmls command: ", mmls_cmd)
part2 = subprocess.check_output(mmls_cmd, shell=True)
## print("D: Extracting partition-2: ", part2)
part2_list = part2.split()
part2_start = int(part2_list[2])
## print("D: Start offset of Partition-2: ", part2_start)
## icat_cmd ex: icat -o 1 ~/aaa/charlie-work-usb-2009-12-11.aff 130
# redirect_file is set to True if the contents need to be
# written to a file.
if (redirect_file == True):
outfile = self.bcHandleSpecialChars(outfile)
icat_cmd = "icat -o "+str(part2_start)+ " "+ \
image + " " + \
self.fiDictList[i]['inode'] + ' > ' + outfile
## print(">> D: Executing iCAT command: ", icat_cmd)
f2 = os.popen(icat_cmd)
# FIXME: Using subprocess.check_output is making icat_cmd
# fail for some instances. Revisit this. Till then the
# older call os.popen is used, which seems to work fine.
# subprocess.check_output(icat_cmd, shell=True)
## print(">> Writing to file ", outfile)
else:
# Only printable files are dumped on the textEdit wondow.
# The rest are redirected to a file in /tmp
if (filename.endswith('txt') or filename.endswith('xml')):
icat_cmd = "icat -o "+str(part2_start)+ " "+ image + " " + self.fiDictList[i]['inode']
## print(">> D: Executing iCAT command: ", icat_cmd)
f2 = os.popen(icat_cmd)
icat_out = f2.read()
print(">> Dumping Contents of the file :", filename)
print("\n")
print(icat_out)
else:
# Strip the path to extract just the name of the file.
justFilename = self.bcGetFilenameFromPath(filename)
icat_cmd = "icat -o "+str(part2_start)+ " "+ \
image + " " + \
self.fiDictList[i]['inode'] + ' > /tmp/'+justFilename
f2 = os.popen(icat_cmd)
# Open the file in the pdf reader if it is a PDF file
# else copy it to a file in /tmp
if justFilename.endswith('pdf'):
print(">>> Opening the PDF file /tmp/",justFilename)
os.system('evince /tmp/'+justFilename)
else:
print(">>> File copied to: ", '/tmp/'+justFilename)
return
# Callback function for SAX processing of the dfxml file.
def cb(self, fi):
self.fiDictList.append({self.acc_dict_array[0]:fi.filename(), \
self.acc_dict_array[1]:fi.partition(), \
self.acc_dict_array[2]:fi.inode(), \
self.acc_dict_array[3]:fi.name_type(), \
self.acc_dict_array[4]:fi.filesize() })
# The fiwalk utility fiwalk_using_sax is invoked with a callback
# to process the dfxml file contents.
def bcProcessDfxmlFileUsingSax(self, dfxmlfile):
fiwalk.fiwalk_using_sax(xmlfile=open(dfxmlfile, 'rb'),callback=self.cb)
# Generate the XML file using the Fiwalk cmd
# It generates a temporary file <image_path>/dfxmlfile.xml
# If such a file exists, the script terminates indicating the reason.
# User can remove it or rename it to continue.
# The Close routine removes this temporary file.
def bcGenerateDfxmlFile(image, dfxmlfile):
# First check if the file image exists
if not os.path.exists(image):
print(">> Error. Image %s does not exist" %image)
return None
cmd = ['fiwalk', '-f', '-X', dfxmlfile, image]
print(">> Generating XML File ", dfxmlfile)
print(">> Invoking command for Fiwalk = ", cmd)
## subprocess.check_output(cmd, shell=True)
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
(data, err) = p.communicate()
if p.returncode:
print(">>> Fiwalk command failed for image %s " %image)
return None
else:
print(">>> Generated the file %s " %dfxmlfile)
return dfxmlfile
if __name__=="__main__":
import sys, time, re
parser = ArgumentParser(prog='bc_disk_access.py', description='File Access')
parser.add_argument('--image', action='store', help="Image file ")
parser.add_argument('--dfxmlfile', action='store', help="DFXML file ")
parser.add_argument('--cat',action='store_true',help='list contents ')
parser.add_argument('--listfiles',action='store_true',help='list file structure ')
parser.add_argument('--filename',action='store',help='File name to list contents of ')
parser.add_argument('--outdir',action='store',help='Output Directory ')
args = parser.parse_args()
## print("D: Image: ", args.image)
## print("D: dfxmlfile: ", args.dfxmlfile)
## print("D: cat: ", args.cat)
## print("D: listfiles: ", args.listfiles)
## print("D: filename: ", args.filename)
## print("D: output file", args.outfile)
# If dfxmlfile not given, run the fiwalk cmd to extract the dfxml file
# First check if the file image exists
if not os.path.exists(args.image):
print("\n>> Error!! Image %s does not exist \n" %args.image)
exit(0)
global isGenDfxmlFile
isGenDfxmlFile = False
# If dfxml file not provided, generate it now.
if (args.dfxmlfile == None):
# Get the directory where "image" exists
directory = os.path.dirname(args.image)
dfxmlfile = directory+'/dfxmlfile.xml'
if os.path.exists(dfxmlfile):
print("\n>> File %s exists. Remove it and run the command again.\n" %dfxmlfile)
exit(0)
bcGenerateDfxmlFile(args.image, dfxmlfile)
if dfxmlfile == None:
print(">> Error: Fiwalk generation failed")
exit(0)
global isGenDfxmlFile
isGenDfxmlFile = True
else:
dfxmlfile = args.dfxmlfile
if not os.path.exists(dfxmlfile):
# dfxmlfile provided in the args, but it doesn't exist
bcGenerateDfxmlFile(args.image, dfxmlfile)
if dfxmlfile == None:
print(">> Error: Fiwalk generation failed")
exit(0)
global isGenDfxmlFile
isGenDfxmlfile = True
filestr = BcFileStructure()
# The following call is just to test bcCatFile, giving a filename
# from the dfxml file. In reality, it will be invoked from a click on
# a file in the web browser.
if (args.cat == True):
if args.filename == None or dfxmlfile == None:
print(">> Filename or dfxml file not provided. Exiting")
exit(0)
if not os.path.exists(dfxmlfile):
print(">> File %s doesnot exist " %dfxmlfile)
exit(0)
filestr.bcCatFile(args.filename, args.image, dfxmlfile, False, None)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# expand third container
## parent0 = BcFileStructure.bcExtractFileStr.parent0
## index = model.indexFromItem(parent0)
## view.expand(index)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# select last row
## selmod = view.selectionModel()
#index2 = model.indexFromItem(child3)
## index2 = model.indexFromItem(parent0)
## selmod.select(index2, QtGui.QItemSelectionModel.Select|QtGui.QItemSelectionModel.Rows)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if (args.listfiles == True):
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow(args.outdir)
ui.setupUi(MainWindow)
filestr.bcExtractFileStr(args.image, dfxmlfile, args.outdir)
MainWindow.show()
sys.exit(app.exec_())
| gpl-3.0 | -3,710,970,981,776,132,000 | 44.547009 | 146 | 0.597986 | false | 4.140101 | false | false | false | 0.007694 |
XiaosongWei/crosswalk | tools/utils.py | 35 | 1660 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' This script provides utils for python scripts in crosswalk.
'''
import os
import sys
import subprocess
def TryAddDepotToolsToPythonPath():
depot_tools = FindDepotToolsInPath()
if depot_tools:
sys.path.append(depot_tools)
python_path = os.environ.get('PYTHONPATH')
if python_path:
os.environ['PYTHONPATH'] = os.path.pathsep.join(
python_path.split(os.path.pathsep)+[depot_tools])
else:
os.environ['PYTHONPATH'] = depot_tools
def FindDepotToolsInPath():
paths = os.getenv('PATH').split(os.path.pathsep)
for path in paths:
if os.path.basename(path) == '':
# path is end with os.path.pathsep
path = os.path.dirname(path)
if os.path.basename(path) == 'depot_tools':
return path
return None
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
def GitExe():
if IsWindows():
return 'git.bat'
else:
return 'git'
def GetCommandOutput(command, cwd=None):
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, bufsize=1,
cwd=cwd)
output = proc.communicate()[0]
result = proc.returncode
if result:
raise Exception('%s: %s' % (subprocess.list2cmdline(command), output))
return output
| bsd-3-clause | -2,605,723,234,060,883,000 | 27.135593 | 74 | 0.677108 | false | 3.632385 | false | false | false | 0.016265 |
pjimmybrcd/campus_ztp_nps | actions/secure_copy.py | 4 | 1487 | """
Copyright 2016 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from lib import actions, Secure_Copy, ztp_utils
class SecureCopyAction(actions.SessionAction):
def __init__(self, config):
super(SecureCopyAction, self).__init__(config)
def run(self, hostname, source, destination, direction, username='', password=''):
ztp_utils.replace_default_userpass(self, username, password,
enable_username='', enable_password='')
scp = Secure_Copy.Secure_Copy(hostname, self._username, self._password)
# TODO: This should be done when keys are re-generated
scp.erase_existing_ssh_key_for_host()
if direction == 'to':
success = scp.send_file(source, destination)
if direction == 'from':
success = scp.get_file(source, destination)
if success:
return (True, "File Copied!")
else:
return (False, "Failed")
| apache-2.0 | 3,590,830,712,106,743,300 | 39.189189 | 86 | 0.67384 | false | 4.310145 | false | false | false | 0.001345 |
lunzhy/PyShanbay | tests/ui_test.py | 1 | 2044 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Lunzhy'
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from pyshanbay.shanbay import VisitShanbay
from pyshanbay import page_parser as parser
from gui.ui_main import UIMainWidget
class MainForm(QWidget):
def __init__(self):
super().__init__()
self.table = QTableWidget()
layout = QHBoxLayout()
layout.addWidget(self.table)
self.setLayout(layout)
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.table.setSelectionMode(QAbstractItemView.SingleSelection)
self.table.verticalHeader().setResizeMode(QHeaderView.Fixed)
self.table.itemClicked.connect(self.show_selected)
return
def set_data(self, members_data):
self.table.setColumnCount(2)
self.table.setRowCount(len(members_data))
for row_index, member in enumerate(members_data):
new_item = QTableWidgetItem(member['nickname'])
self.table.setItem(row_index, 0, new_item)
new_item = QTableWidgetItem(str(member['checked_today']))
self.table.setItem(row_index, 1, new_item)
return
def show_selected(self):
select = self.table.selectionModel().selectedRows()
print(self.table.item(select[0].row(), 0).text())
print(self.table.item(select[0].row(), 1).text())
return
def get_data():
shanbay = VisitShanbay()
shanbay.login()
page_members = shanbay.members()
total_page = parser.total_page_members(page_members)
pages = []
for page in range(1, int(total_page) + 1):
page_html = shanbay.members_page(page)
pages.append(page_html)
members_info = parser.parse_members_info(pages)
return members_info
if __name__ == '__main__':
app = QApplication(sys.argv)
main_form = UIMainWidget()
main_form.set_data_members(get_data())
main_form.show()
app.exec_() | mit | 5,132,707,718,424,770,000 | 28.214286 | 70 | 0.652642 | false | 3.512027 | false | false | false | 0.000978 |
pleaseproject/python-for-android | python3-alpha/extra_modules/gdata/tlslite/utils/OpenSSL_RSAKey.py | 48 | 5017 | """OpenSSL/M2Crypto RSA implementation."""
from .cryptomath import *
from .RSAKey import *
from .Python_RSAKey import Python_RSAKey
#copied from M2Crypto.util.py, so when we load the local copy of m2
#we can still use it
def password_callback(v, prompt1='Enter private key passphrase:',
prompt2='Verify passphrase:'):
from getpass import getpass
while 1:
try:
p1=getpass(prompt1)
if v:
p2=getpass(prompt2)
if p1==p2:
break
else:
break
except KeyboardInterrupt:
return None
return p1
if m2cryptoLoaded:
class OpenSSL_RSAKey(RSAKey):
def __init__(self, n=0, e=0):
self.rsa = None
self._hasPrivateKey = False
if (n and not e) or (e and not n):
raise AssertionError()
if n and e:
self.rsa = m2.rsa_new()
m2.rsa_set_n(self.rsa, numberToMPI(n))
m2.rsa_set_e(self.rsa, numberToMPI(e))
def __del__(self):
if self.rsa:
m2.rsa_free(self.rsa)
def __getattr__(self, name):
if name == 'e':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_e(self.rsa))
elif name == 'n':
if not self.rsa:
return 0
return mpiToNumber(m2.rsa_get_n(self.rsa))
else:
raise AttributeError
def hasPrivateKey(self):
return self._hasPrivateKey
def hash(self):
return Python_RSAKey(self.n, self.e).hash()
def _rawPrivateKeyOp(self, m):
s = numberToString(m)
byteLength = numBytes(self.n)
if len(s)== byteLength:
pass
elif len(s) == byteLength-1:
s = '\0' + s
else:
raise AssertionError()
c = stringToNumber(m2.rsa_private_encrypt(self.rsa, s,
m2.no_padding))
return c
def _rawPublicKeyOp(self, c):
s = numberToString(c)
byteLength = numBytes(self.n)
if len(s)== byteLength:
pass
elif len(s) == byteLength-1:
s = '\0' + s
else:
raise AssertionError()
m = stringToNumber(m2.rsa_public_decrypt(self.rsa, s,
m2.no_padding))
return m
def acceptsPassword(self): return True
def write(self, password=None):
bio = m2.bio_new(m2.bio_s_mem())
if self._hasPrivateKey:
if password:
def f(v): return password
m2.rsa_write_key(self.rsa, bio, m2.des_ede_cbc(), f)
else:
def f(): pass
m2.rsa_write_key_no_cipher(self.rsa, bio, f)
else:
if password:
raise AssertionError()
m2.rsa_write_pub_key(self.rsa, bio)
s = m2.bio_read(bio, m2.bio_ctrl_pending(bio))
m2.bio_free(bio)
return s
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = OpenSSL_RSAKey()
def f():pass
key.rsa = m2.rsa_generate_key(bits, 3, f)
key._hasPrivateKey = True
return key
generate = staticmethod(generate)
def parse(s, passwordCallback=None):
if s.startswith("-----BEGIN "):
if passwordCallback==None:
callback = password_callback
else:
def f(v, prompt1=None, prompt2=None):
return passwordCallback()
callback = f
bio = m2.bio_new(m2.bio_s_mem())
try:
m2.bio_write(bio, s)
key = OpenSSL_RSAKey()
if s.startswith("-----BEGIN RSA PRIVATE KEY-----"):
def f():pass
key.rsa = m2.rsa_read_key(bio, callback)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = True
elif s.startswith("-----BEGIN PUBLIC KEY-----"):
key.rsa = m2.rsa_read_pub_key(bio)
if key.rsa == None:
raise SyntaxError()
key._hasPrivateKey = False
else:
raise SyntaxError()
return key
finally:
m2.bio_free(bio)
else:
raise SyntaxError()
parse = staticmethod(parse)
| apache-2.0 | -4,449,020,846,723,608,600 | 32.898649 | 72 | 0.449272 | false | 4.424162 | false | false | false | 0.00299 |
utkbansal/kuma | kuma/contentflagging/models.py | 16 | 7036 | """Models for content moderation flagging"""
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core import urlresolvers
from django.core.mail import send_mail
from django.db import models
from django.template import Context, loader
from django.utils.translation import ugettext_lazy as _
from kuma.core.utils import get_unique
FLAG_REASONS = getattr(settings, "FLAG_REASONS", (
('notworking', _('This is not working for me')),
('inappropriate', _('This contains inappropriate content')),
('plagarised', _('This was not created by the author')),
('fakeauthor', _('The author is fake')),
))
FLAG_STATUS_FLAGGED = "flagged"
FLAG_STATUS_REJECTED = "rejected"
FLAG_STATUS_NOTIFIED = "notified"
FLAG_STATUS_HIDDEN = "hidden"
FLAG_STATUS_DELETED = "deleted"
FLAG_STATUSES = getattr(settings, "FLAG_STATUSES", (
(FLAG_STATUS_FLAGGED, _("Flagged")),
(FLAG_STATUS_REJECTED, _("Flag rejected by moderator")),
(FLAG_STATUS_NOTIFIED, _("Creator notified")),
(FLAG_STATUS_HIDDEN, _("Content hidden by moderator")),
(FLAG_STATUS_DELETED, _("Content deleted by moderator")),
))
FLAG_NOTIFICATIONS = {}
for reason in FLAG_REASONS:
FLAG_NOTIFICATIONS[reason[0]] = True
# to refine flag notifications, change preceding line to False and add
# individual reasons to the set like so:
# FLAG_NOTIFICATIONS['inappropriate'] = True
class ContentFlagManager(models.Manager):
"""Manager for ContentFlags"""
def flag(self, request, object, flag_type, explanation, recipients=None):
"""Create a flag for a content item, if the unique request hasn't
already done so before."""
if flag_type not in dict(FLAG_REASONS):
return (None, False)
content_type = ContentType.objects.get_for_model(object)
user, ip, user_agent, unique_hash = get_unique(content_type, object.pk,
request=request)
defaults = dict(content_type=content_type,
object_pk=object.pk, ip=ip,
user_agent=user_agent, user=user,
flag_type=flag_type,
explanation=explanation)
cf = ContentFlag.objects.get_or_create(unique_hash=unique_hash,
defaults=defaults)
if recipients:
subject = _("{object} Flagged")
subject = subject.format(object=object)
t = loader.get_template('contentflagging/email/flagged.ltxt')
url = '/admin/contentflagging/contentflag/' + str(object.pk)
content = t.render(Context({'url': url,
'object': object,
'flag_type': flag_type}))
send_mail(subject, content,
settings.DEFAULT_FROM_EMAIL, recipients)
return cf
def flags_by_type(self, status=FLAG_STATUS_FLAGGED):
"""Return a dict of flags by content type."""
flags = (self.filter(flag_status=status)
.prefetch_related('content_object'))
flag_dict = {}
for flag in flags:
model_class = flag.content_type.model_class()
model_name = model_class._meta.verbose_name_plural
if model_name not in flag_dict:
flag_dict[model_name] = []
flag_dict[model_name].append(flag)
return flag_dict
class ContentFlag(models.Model):
"""Moderation flag submitted against a content item"""
objects = ContentFlagManager()
class Meta:
ordering = ('-created',)
get_latest_by = 'created'
flag_status = models.CharField(_('current status of flag review'),
max_length=16, blank=False,
choices=FLAG_STATUSES, default='flagged')
flag_type = models.CharField(_('reason for flagging the content'),
max_length=64, db_index=True,
blank=False, choices=FLAG_REASONS)
explanation = models.TextField(_('please explain what content you '
'feel is inappropriate'),
max_length=255, blank=True)
content_type = models.ForeignKey(ContentType, editable=False,
verbose_name="content type",
related_name="content_type_set_for_%(class)s",)
object_pk = models.CharField(_('object ID'), max_length=32, editable=False)
content_object = GenericForeignKey('content_type', 'object_pk')
ip = models.CharField(max_length=40, editable=False, blank=True, null=True)
user_agent = models.CharField(max_length=128, editable=False,
blank=True, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable=False,
blank=True, null=True)
# HACK: As it turns out, MySQL doesn't consider two rows with NULL values
# in a column as duplicates. So, resorting to calculating a unique hash in
# code.
unique_hash = models.CharField(max_length=32, editable=False,
unique=True, db_index=True, null=True)
created = models.DateTimeField(_('date submitted'), auto_now_add=True,
blank=False, editable=False)
modified = models.DateTimeField(_('date last modified'),
auto_now=True, blank=False)
def __unicode__(self):
return ('ContentFlag %(flag_type)s -> "%(title)s"' % dict(
flag_type=self.flag_type, title=str(self.content_object)))
def save(self, *args, **kwargs):
# Ensure unique_hash is updated whenever the object is saved
user, ip, user_agent, unique_hash = get_unique(
self.content_type, self.object_pk,
ip=self.ip, user_agent=self.user_agent, user=self.user)
self.unique_hash = unique_hash
super(ContentFlag, self).save(*args, **kwargs)
def content_view_link(self):
"""HTML link to the absolute URL for the linked content object"""
object = self.content_object
return ('<a target="_new" href="%(link)s">View %(title)s</a>' %
dict(link=object.get_absolute_url(), title=object))
content_view_link.allow_tags = True
def content_admin_link(self):
"""HTML link to the admin page for the linked content object"""
object = self.content_object
ct = ContentType.objects.get_for_model(object)
url_name = 'admin:%(app)s_%(model)s_change' % dict(
app=ct.app_label, model=ct.model)
link = urlresolvers.reverse(url_name, args=(object.id,))
return ('<a target="_new" href="%(link)s">Edit %(title)s</a>' %
dict(link=link, title=object))
content_admin_link.allow_tags = True
| mpl-2.0 | 6,790,204,614,733,682,000 | 43.251572 | 84 | 0.598209 | false | 4.160852 | false | false | false | 0.000142 |
CasataliaLabs/biscuit_drishtiman | Pmw-2.0.0/build/lib.linux-x86_64-2.7/Pmw/Pmw_1_3_3/tests/RadioSelect_test.py | 2 | 3519 | import Tkinter
import Test
import Pmw
Test.initialise()
if Tkinter.TkVersion >= 8.4:
expected1 = 'TclError: bad relief "bogus": must be '
else:
expected1 = 'TclError: bad relief type "bogus": must be '
c = Pmw.RadioSelect
kw_1 = {'labelpos' : 'nw', 'label_text' : 'Radio Select:'}
tests_1 = (
(c.pack, (), {'padx' : 10, 'pady' : 10, 'fill' : 'both', 'expand' : 1}),
(Test.num_options, (), 8),
(c.index, Pmw.END, 'ValueError: RadioSelect has no buttons'),
(c.add, ('Fruit',), Tkinter.Button),
(c.add, ('Vegetables',), Tkinter.Button),
(c.add, ('CornFlakes',), {'text': 'Cereals'}, Tkinter.Button),
(c.add, ('Legumes',), Tkinter.Button),
(c.add, ('Legumes',), 'ValueError: button "Legumes" already exists'),
(c.index, 0, 0),
(c.index, Pmw.END, 3),
(c.index, 'Vegetables', 1),
(c.index, 'Fruit', 0),
(c.index, 12, 'ValueError: index "12" is out of range'),
(c.index, 'bogus', 'ValueError: bad index "bogus": ' + \
'must be a name, a number or Pmw.END'),
('hull_background', 'yellow'),
('hull_show', 'X', 'TclError: unknown option "-show"'),
('frame_relief', 'raised'),
('frame_borderwidth', 4),
('frame_borderwidth', 2),
('command', Test.callback1),
(c.invoke, 'Vegetables', 'Vegetables'),
('hull_cursor', 'gumby'),
('Button_state', 'disabled'),
('Button_background', 'Green'),
('Button_cursor', 'watch'),
('Button_background', 'grey85'),
('label_foreground', 'Green'),
('label_foreground', 'Black'),
('label_highlightcolor', 'Red'),
('Fruit_background', 'red'),
('Vegetables_background', 'green'),
('CornFlakes_background', 'yellow'),
('Legumes_background', 'brown'),
('Legumes_foreground', 'white'),
(c.add, ('Foo',), Tkinter.Button),
('label_text', 'Label'),
('frame_relief', 'sunken'),
('frame_relief', 'bogus', expected1 + Test.reliefs),
(c.deleteall, ()),
)
kw_2 = {
'labelpos' : 'nw',
'label_text' : 'Multiple:',
'selectmode' : 'multiple',
}
tests_2 = (
(c.pack, (), {'padx' : 10, 'pady' : 10, 'fill' : 'both', 'expand' : 1}),
(c.add, ('Fruit',), Tkinter.Button),
(c.add, ('Vegetables',), Tkinter.Button),
(c.add, ('CornFlakes',), {'text': 'Cereals'}, Tkinter.Button),
(c.add, ('Legumes',), Tkinter.Button),
('command', Test.callback2),
(c.getcurselection, (), ()),
(c.invoke, 'Vegetables', ('Vegetables', 1)),
(c.getcurselection, (), ('Vegetables',)),
(c.invoke, 'Legumes', ('Legumes', 1)),
(c.getcurselection, (), ('Vegetables', 'Legumes')),
(c.invoke, 'Fruit', ('Fruit', 1)),
(c.getcurselection, (), ('Vegetables', 'Legumes', 'Fruit')),
(c.invoke, 'Legumes', ('Legumes', 0)),
(c.getcurselection, (), ('Vegetables', 'Fruit')),
(c.deleteall, ()),
(c.add, ('Fruit',), Tkinter.Button),
(c.add, ('Vegetables',), Tkinter.Button),
(c.invoke, 'Vegetables', ('Vegetables', 1)),
(c.getcurselection, (), ('Vegetables',)),
)
alltests = [
(tests_1, kw_1),
(tests_2, kw_2),
]
tests_3 = (
(c.pack, (), {'padx' : 10, 'pady' : 10}),
(c.add, ('Foo',), Tkinter.Button),
(c.add, ('Bar',), Tkinter.Button),
)
poslist = ('nw', 'n', 'ne', 'en', 'e', 'es', 'se', 's', 'sw', 'ws', 'w', 'wn',)
for pos in poslist:
kw_3 = {
'labelpos' : pos,
'orient' : 'vertical',
'padx' : 20,
'pady' : 20,
'label_text' : 'Radio Select',
}
alltests.append((tests_3, kw_3))
testData = ((c, alltests),)
if __name__ == '__main__':
Test.runTests(testData)
| gpl-3.0 | 3,893,455,897,087,093,000 | 29.419643 | 79 | 0.551861 | false | 2.795075 | true | false | false | 0.007104 |
JordiGoPython/Python-Hack-Example | TransversalDirectory.py | 1 | 1799 | paths_wordpress = {'wp-login.php':
['index.php','license.txt','readme.html',
'wp-activate.php', 'wp-app.php',
'wp-blog-header.php', 'wp-comments-post.php',
'wp-config-sample.php', 'wp-cron.php',
'wp-links-opml.php', 'wp-load.php',
'wp-mail.php',
'wp-pass.php', 'wp-register.php',
'wp-settings.php', 'wp-signup.php',
'wp-trackback.php', 'xmlrpc.php']
}
paths_recursos = [
'includes','js',
'css', 'src',
'img', 'includ',
'../../../../../',
'docs', 'pdfs',
'pdf', 'documents',
'fotos', 'photos',
]
body_files = {}
import os
import urllib
import sys
from bs4 import BeautifulSoup
continuando = False
for urlsWP, files in paths_wordpress.items():
response = urllib.urlopen(sys.argv[1]+'/'+urlsWP)
if response.getcode()==int(200) and continuando==False:
print "CMS WORDPRESS ENCONTRADO"
for f in files:
response = urllib.urlopen(sys.argv[1]+'/'+urlsWP)
if response.getcode()==int(200):
print "PATHS ENCONTRADOS"
print response.geturl()
else:
print "WORDPRESS NO ENCONTRADO"
for path in paths_recursos:
response = urllib.urlopen(sys.argv[1]+'/'+path+'/')
if response.getcode()==200:
print "PATHS ENCONTRADOS"
html_tags = BeautifulSoup(response,"lxml")
file_html=html_tags.find_all('a')
arrays = []
for f_h in file_html:
arrays.append(f_h.get('href'))
body_files[path] = arrays
print response.geturl()
if len(body_files):
while True:
print body_files.keys()
key_paths = raw_input('Ingrese el path que desea mostrar: ')
try:
print body_files[key_paths]
except Exception, e:
print "valor ingresado incorrecto"
finally:
yes_no = raw_input('Desea Salir? Y/N :')
if yes_no!='n' or str.upper(yes_no) != 'N':
break
contenido = urllib.urlopen(sys.argv[1]+'/robots.txt').read()
if contenido.getcode()==200:
print contenido
| gpl-2.0 | -50,693,655,184,626,750 | 24.7 | 62 | 0.659255 | false | 2.59596 | false | false | false | 0.040578 |
ThiefMaster/sqlalchemy | examples/materialized_paths/materialized_paths.py | 29 | 3989 | """Illustrates the "materialized paths" pattern.
Materialized paths is a way to represent a tree structure in SQL with fast
descendant and ancestor queries at the expense of moving nodes (which require
O(n) UPDATEs in the worst case, where n is the number of nodes in the tree). It
is a good balance in terms of performance and simplicity between the nested
sets model and the adjacency list model.
It works by storing all nodes in a table with a path column, containing a
string of delimited IDs. Think file system paths:
1
1.2
1.3
1.3.4
1.3.5
1.3.6
1.7
1.7.8
1.7.9
1.7.9.10
1.7.11
Descendant queries are simple left-anchored LIKE queries, and ancestors are
already stored in the path itself. Updates require going through all
descendants and changing the prefix.
"""
from sqlalchemy import Column, Integer, String, func, select, create_engine
from sqlalchemy.orm import remote, foreign, relationship, Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.expression import cast
from sqlalchemy.dialects.postgresql import ARRAY
Base = declarative_base()
class Node(Base):
__tablename__ = "node"
id = Column(Integer, primary_key=True, autoincrement=False)
path = Column(String(500), nullable=False, index=True)
# To find the descendants of this node, we look for nodes whose path
# starts with this node's path.
descendants = relationship(
"Node", viewonly=True, order_by=path,
primaryjoin=remote(foreign(path)).like(path.concat(".%")))
# Finding the ancestors is a little bit trickier. We need to create a fake
# secondary table since this behaves like a many-to-many join.
secondary = select([
id.label("id"),
func.unnest(cast(func.string_to_array(
func.regexp_replace(path, r"\.?\d+$", ""), "."),
ARRAY(Integer))).label("ancestor_id")
]).alias()
ancestors = relationship("Node", viewonly=True, secondary=secondary,
primaryjoin=id == secondary.c.id,
secondaryjoin=secondary.c.ancestor_id == id,
order_by=path)
@property
def depth(self):
return len(self.path.split(".")) - 1
def __repr__(self):
return "Node(id={})".format(self.id)
def __str__(self):
root_depth = self.depth
s = [str(self.id)]
s.extend(((n.depth - root_depth) * " " + str(n.id))
for n in self.descendants)
return "\n".join(s)
def move_to(self, new_parent):
new_path = new_parent.path + "." + str(self.id)
for n in self.descendants:
n.path = new_path + n.path[len(self.path):]
self.path = new_path
if __name__ == "__main__":
engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
print("-" * 80)
print("create a tree")
session.add_all([
Node(id=1, path="1"),
Node(id=2, path="1.2"),
Node(id=3, path="1.3"),
Node(id=4, path="1.3.4"),
Node(id=5, path="1.3.5"),
Node(id=6, path="1.3.6"),
Node(id=7, path="1.7"),
Node(id=8, path="1.7.8"),
Node(id=9, path="1.7.9"),
Node(id=10, path="1.7.9.10"),
Node(id=11, path="1.7.11"),
])
session.flush()
print(str(session.query(Node).get(1)))
print("-" * 80)
print("move 7 under 3")
session.query(Node).get(7).move_to(session.query(Node).get(3))
session.flush()
print(str(session.query(Node).get(1)))
print("-" * 80)
print("move 3 under 2")
session.query(Node).get(3).move_to(session.query(Node).get(2))
session.flush()
print(str(session.query(Node).get(1)))
print("-" * 80)
print("find the ancestors of 10")
print([n.id for n in session.query(Node).get(10).ancestors])
session.close()
Base.metadata.drop_all(engine)
| mit | -2,058,899,194,344,084,200 | 30.912 | 80 | 0.615944 | false | 3.450692 | false | false | false | 0.000251 |
genestack/task-library | genestack/bio/variation/variation_indexer.py | 1 | 16269 | # -*- coding: utf-8 -*-
import re
import sys
import vcf
from genestack.bio import bio_meta_keys
from genestack.genestack_indexer import Indexer
from genestack.genestack_exceptions import GenestackException
from genestack.bio.reference_genome.reference_genome_file import ReferenceGenome
from genestack.metainfo import StringValue, Metainfo
from genestack.utils import normalize_contig_name
# FIXME find usages and remove this constants from here
DATA_LINK = Metainfo.DATA_URL
DATA_LOCATION = 'genestack.location:data'
EFF_FIELDS = ['Effect', 'Effect_Impact', 'Functional_Class', 'Codon_Change',
'Amino_Acid_Change', 'Amino_Acid_length', 'Gene_Name',
'Transcript_BioType', 'Gene_Coding', 'Transcript_ID',
'Exon_Rank', 'Genotype_Number', 'ERRORS', 'WARNINGS']
EFF_SCHEMA_FIELDS = [('eff_' + e.lower()) for e in EFF_FIELDS]
class RecordConverter(object):
BASE_SCHEMA = {
'CHROM': 'contig_s',
'LOCATION': 'location_iv',
'START': 'start_l',
'REF': 'ref_s_ci',
'QUAL': 'qual_f',
'ID': 'id_ss_ci',
'FILTER': 'filter_ss_ci',
'ALT': 'alt_ss_ci',
'ALT_COUNT': 'alt_len_i_ns',
'TYPE': 'type_ss_ci'
}
def __init__(self, vcf_reader):
"""
Record converter converts vcf.Record to feature.
``self.schema`` is filled with info from vcf.Reader.infos.
We can not create schema by analysing record items if we create record manually.
There are some differences in schema depending on how it was created:
- value types
- manual: can contain numbers, string and unicode values
- parsed: contain only strings
- single values
- manual: records always contain list of values
- parsed: have single value if it is single in schema
"""
self.range_limit = self.__get_range_limit(vcf_reader.infos)
self.schema = self.BASE_SCHEMA.copy()
for info in vcf_reader.infos.values():
if info.type == 'Float':
suffix = 'f'
elif info.type == 'Integer':
suffix = 'l'
elif info.type in ('Character', 'String'):
suffix = 's'
elif info.type == 'Flag':
suffix = 'b'
else:
raise GenestackException('Unexpected vcf info type for {}'.format(info))
# for single bool value num is 0
if str(info.num) not in ('0', '1'):
suffix += 's'
self.schema[info.id] = 'info_%s_%s' % (info.id, suffix)
@staticmethod
def __get_range_limit(infos):
"""
Return range limit from vcf.Reader.infos.
Get low and high range for field types.
``None`` mean that there is no limit.
Text searched by regular expression, wrong values will be silently ignored.
examples:
- (Range:1-10) 1.0, 10.0
- (Range:-10.33) None, 10.33
- (Range:10-) 10.0, None
"""
range_limit = {}
reg = re.compile('\(Range:([0-9]*\.?[0-9]*)-([0-9]*\.?[0-9]*)\)')
for key, val in infos.items():
match = reg.search(val.desc)
if match:
range_limit[key] = tuple(float(x) if x else None for x in match.group(1, 2))
return range_limit
def convert_record_to_feature(self, line_id, record):
"""
Convert vcf.Record to feature.
:param line_id: line id in file, first line of file has id=1
:type line_id: long
:param record: record
:type record: vcf.Record
:return:
"""
contig = normalize_contig_name(record.CHROM)
start = record.start
end = record.end
record_id = record.ID
ref = record.REF
substitutions = record.ALT
quality = record.QUAL
filter_field = record.FILTER
info = record.INFO
samples_format = record.FORMAT
samples = record.samples
data = {
'__id__': str(line_id),
'line_l': line_id,
'contig_s': contig,
'location_iv': str(start) + " " + str(end),
'start_l': start,
'ref_s_ci': ref,
'qual_f': quality
}
if record_id is not None and record_id != '.':
data['id_ss_ci'] = record_id.split(',')
if filter_field != '.':
data['filter_ss_ci'] = filter_field
data.update(self.__get_samples_info(samples_format, samples))
alt = list()
types = list()
for subst in substitutions:
sub = str(subst) if subst is not None else '.'
alt.append(sub)
types.append(self.__get_type(ref, sub))
data['alt_ss_ci'] = alt
data['alt_len_i_ns'] = len(alt)
data['type_ss_ci'] = types
'''For future use; I would prefer to use PyVCF methods instead of implementing my own.
But there is a slight difference in the results. Please review if these differences are critical.
if record.is_snp:
data['is_snp_b'] = True
if record.is_indel:
data['is_indel_b'] = True
if record.is_transition:
data['is_transition_b'] = True
if record.is_deletion:
data['is_deletion_b'] = True
if record.is_monomorphic:
data['is_monomorphic_b'] = True
data['var_type_s'] = record.var_type
data['var_subtype_s'] = record.var_subtype
'''
for key, value in info.items():
if value is None:
continue
if isinstance(value, list) and value[0] is None:
continue
if key not in self.schema:
typed_key = self.__get_typed_string(key, value)
self.schema[key] = typed_key
typed_key = self.schema[key]
if typed_key == 'info_EFF_ss':
for eff_line in value:
# TODO Here we blindly parse snp_eff line and believe that
# items are in the proper order,
# but we have not even checked snpEff version
# Seems that we should check snpEff version before doing such blind parsing
for i, val in enumerate(re.split('\(|\)|\|', eff_line)):
eff_key = EFF_SCHEMA_FIELDS[i]
eff_typed_key = 'info_splitted_' + eff_key + '_ss'
data.setdefault(eff_typed_key, []).append(val)
self.schema[eff_key] = eff_typed_key
# TODO info_EFF_ss is stored both as raw and as parsed,
# need to check that nobody rely on raw value
data[typed_key] = value
if isinstance(value, list):
key_base = self.__get_typed_string(key, value[0]) + '_ns'
low_limit, high_limit = self.range_limit.get(key, (None, None))
if low_limit:
value = [x for x in value if x >= low_limit]
if high_limit:
value = [x for x in value if x <= high_limit]
if value:
data['sorting_max_' + key_base] = max(value)
data['sorting_min_' + key_base] = min(value)
return data
def __get_samples_info(self, samples_format, samples):
info = {}
format_list = samples_format.split(':') if samples_format is not None else []
for s in samples:
info.setdefault('samples_info_names_ss_ci', []).append(s.sample)
for f in format_list:
val = self.__get_attribute_as_string(s.data, f)
info.setdefault('samples_info_' + f + '_ss', []).append(val)
return info
@staticmethod
def __get_attribute_as_string(data, attr):
val = getattr(data, attr, None)
if val is None:
return ''
if isinstance(val, list):
return ",".join(map(str, val))
return str(val)
@staticmethod
def __get_typed_string(key, value):
"""
Add solr suffix depending on value type
:param key: key
:type key: str
:param value: corresponding value
:type value: any
:return: solr key string
:rtype: str
"""
key = 'info_' + key
list_suffix = 's' if isinstance(value, list) else ''
v = value[0] if list_suffix else value
if v is None:
return None
if isinstance(v, basestring):
suffix = '_s'
elif isinstance(v, bool):
suffix = '_b'
elif isinstance(v, (int, long)):
suffix = '_l'
elif isinstance(v, float):
suffix = '_f'
else:
raise GenestackException("Unknown type for key %s: %s (%s)" % (key, v, type(v)))
return key + suffix + list_suffix
@staticmethod
def __get_type(ref, alt):
if alt == '.':
return 'MR'
if len(ref) == 1 and len(alt) == 1:
return 'SNP'
elif len(ref) == len(alt):
return 'MNP'
elif len(ref) < len(alt):
return 'INS'
else:
return 'DEL'
class VariationIndexer(object):
INDEXING_CHUNK_SIZE = 4000
QUERY_CHUNK_SIZE = 100
MAX_LINE_KEY = 'genestack.initialization:maxLine'
def __init__(self, target_file, reference_genome=None):
self.target_file = target_file
if reference_genome is None:
reference_genome = target_file.resolve_reference(
bio_meta_keys.REFERENCE_GENOME, ReferenceGenome
)
assert reference_genome is not None, "No reference genome found"
self.reference_genome = reference_genome
self.__schema = None
@property
def schema(self):
sys.stderr.write('"schema" attribute is deprecated, use RecordConvertor schema instead\n')
return self.__schema
def get_indexing_line_from(self):
line_from_value = self.target_file.get_metainfo().get_first_string(VariationIndexer.MAX_LINE_KEY)
try:
return int(line_from_value) if line_from_value is not None else 0
except ValueError:
return 0
def set_max_line(self, line_id):
self.target_file.replace_metainfo_value(VariationIndexer.MAX_LINE_KEY, StringValue(str(line_id)))
def iterate_features(self, vcf_reader, record_converter=None, line_from=0):
"""
Returns generator over features corresponding to vcf record in file.
If ``record_converter`` is not specified uses record converter based on this vcf file.
:param vcf_reader: vcf reader
:type vcf_reader: vcf.Reader
:param record_converter: converter from record to feature
:type record_converter: RecordConverter
:param line_from: first line that should be returned, use 0 for the whole file
:return: generator
"""
if record_converter is None:
record_converter = RecordConverter(vcf_reader)
self.__schema = record_converter.schema
for line_id, record in enumerate(vcf_reader, start=1):
if line_from > line_id:
continue
yield line_id, record_converter.convert_record_to_feature(line_id, record)
def get_indexer(self, file_to_index, record_converter=None):
"""
Return context manager to index records.
This indexer has two methods:
- ``index_record`` which accepts line_number and record
- ``index_feature`` which accepts feature
``index_record`` can be called only if record_converter is specified.
:param file_to_index: Genestack file instance
:param record_converter: record converter
:return: indexer
"""
process_features = self.process_features
set_max_line = self.set_max_line
set_initialization_version = self.__set_initialization_version
class RecordIndexer(object):
def __init__(self, file_to_index, record_converter):
self.__file = file_to_index
self.__inside_context = False
self.features = []
self.raw_features = []
self.record_converter = record_converter
self.__last_feature_line_id = None
def __enter__(self):
set_initialization_version()
self.__inside_context = True
self.indexer = Indexer(file_to_index)
self.indexer.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__flush(force=True)
self.indexer.__exit__(exc_type, exc_val, exc_tb)
self.__inside_context = False
def index_record(self, line, record):
if not self.record_converter:
raise GenestackException('Indexing record only possible if record converter is specified')
feature = self.record_converter.convert_record_to_feature(line, record)
self.index_feature(feature)
def index_feature(self, feature):
if not self.__inside_context:
raise GenestackException('RecordIndexer object must be used only inside a "with" statement')
self.raw_features.append(feature)
self.__last_feature_line_id = feature['line_l']
self.__flush()
def __flush(self, force=False):
limit = 0 if force else (VariationIndexer.QUERY_CHUNK_SIZE - 1)
if len(self.raw_features) > limit:
self.features.extend(process_features(self.raw_features))
self.raw_features = []
limit = 0 if force else (VariationIndexer.INDEXING_CHUNK_SIZE - 1)
if len(self.features) > limit:
self.indexer.index_records(self.features)
self.features = []
set_max_line(self.__last_feature_line_id)
return RecordIndexer(file_to_index, record_converter)
def create_index(self, file_name):
"""
Create index for vcf file.
Indexing progress is stored in metainfo, if file started for first time it is empty
and whole file will be indexing. Then record is send to server it metainfo will be updated.
Rerunning file in case of fail will proceed indexing from last point.
:param file_name: existing name of vcf file
:type file_name: str
:return: None
"""
with open(file_name) as f, self.get_indexer(self.target_file, record_converter=None) as indexer:
vcf_reader = vcf.Reader(f)
record_converter = RecordConverter(vcf_reader)
for line_id, feature in self.iterate_features(vcf_reader, record_converter=record_converter,
line_from=self.get_indexing_line_from()):
indexer.index_feature(feature)
def __set_initialization_version(self):
"""
Set version of initialization. This key required to support different versions.
"""
self.target_file.replace_metainfo_value('genestack.indexing:version', StringValue('splitEffAnnotations'))
# TODO: Remove this method if we decide not to index ReferenceGenome data
def __append_genome_features(self, mutation_list):
# code removed at commit f64cdf12ddd9a64ec5cbfdebaa1d01be24224239
pass
def process_features(self, features_list):
"""
This method can be overridden in children to process features before adding them to index.
:param features_list: list of features to be processed
:return: processed feature list
"""
# hack to support old api
if hasattr(self, 'process_record'):
import sys
sys.stderr.write('Warning! "process_record" method is deprecated use "process_features" instead\n')
return self.process_record(features_list)
else:
return features_list
| mit | -5,214,837,518,535,765,000 | 37.460993 | 113 | 0.564386 | false | 4.071321 | false | false | false | 0.002213 |
evernym/plenum | plenum/test/node_catchup/test_revert_during_catchup.py | 2 | 6099 | from itertools import combinations
import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID, COMMIT
from plenum.server.replica_validator_enums import STASH_CATCH_UP
from plenum.test import waits
from plenum.test.delayers import cDelay, cr_delay, lsDelay
from plenum.test.helper import check_last_ordered_3pc, \
assertEquality, sdk_send_random_and_check
from plenum.test.node_catchup.helper import waitNodeDataInequality, \
ensure_all_nodes_have_same_data, make_a_node_catchup_less, \
repair_node_catchup_less
from plenum.test.spy_helpers import getAllReturnVals
from plenum.test.test_node import getNonPrimaryReplicas, \
checkProtocolInstanceSetup
from plenum.test.view_change.helper import ensure_view_change
from stp_core.loop.eventually import eventually
Max3PCBatchSize = 3
TestRunningTimeLimitSec = 125
@pytest.fixture(scope="module")
def tconf(tconf):
oldMax3PCBatchSize = tconf.Max3PCBatchSize
oldMax3PCBatchWait = tconf.Max3PCBatchWait
tconf.Max3PCBatchSize = Max3PCBatchSize
tconf.Max3PCBatchWait = 1000
yield tconf
tconf.Max3PCBatchSize = oldMax3PCBatchSize
tconf.Max3PCBatchWait = oldMax3PCBatchWait
@pytest.mark.skip(reason="We don't make a catchup during new view_change")
def test_slow_node_reverts_unordered_state_during_catchup(looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
"""
Delay COMMITs to a node such that when it needs to catchup, it needs to
revert some unordered state. Also till this time the node should have
receive all COMMITs such that it will apply some of the COMMITs (
for which it has not received txns from catchup).
For this delay COMMITs by long, do catchup for a little older than the
state received in LedgerStatus, once catchup completes, reset delays and
try to process delayed COMMITs, some COMMITs will be rejected but some will
be processed since catchup was done for older ledger.
"""
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 3 * Max3PCBatchSize)
nprs = getNonPrimaryReplicas(txnPoolNodeSet, 0)
slow_node = nprs[-1].node
other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
slow_master_replica = slow_node.master_replica
commit_delay = 150
catchup_rep_delay = 25
# Delay COMMITs to one node
slow_node.nodeIbStasher.delay(cDelay(commit_delay, 0))
# Delay LEDGER_STAUS on slow node, so that only MESSAGE_REQUEST(LEDGER_STATUS) is sent, and the
# node catch-ups 2 times.
# Otherwise other nodes may receive multiple LEDGER_STATUSes from slow node, and return Consistency proof for all
# missing txns, so no stashed ones are applied
slow_node.nodeIbStasher.delay(lsDelay(1000))
# Make the slow node receive txns for a smaller ledger so it still finds
# the need to catchup
delay_batches = 2
make_a_node_catchup_less(slow_node, other_nodes, DOMAIN_LEDGER_ID,
delay_batches * Max3PCBatchSize)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 6 * Max3PCBatchSize)
ensure_all_nodes_have_same_data(looper, other_nodes)
waitNodeDataInequality(looper, slow_node, *other_nodes)
old_lcu_count = slow_node.spylog.count(slow_node.allLedgersCaughtUp)
# `slow_node` is slow to receive CatchupRep, so that it
# gets a chance to order COMMITs
slow_node.nodeIbStasher.delay(cr_delay(catchup_rep_delay))
old_last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc
# start view change (and hence catchup)
ensure_view_change(looper, txnPoolNodeSet)
# Check last ordered of `other_nodes` is same
for n1, n2 in combinations(other_nodes, 2):
check_last_ordered_3pc(n1, n2)
assert slow_master_replica.last_prepared_before_view_change == old_last_ordered
old_pc_count = slow_master_replica._ordering_service.spylog.count(
slow_master_replica._ordering_service._validate)
assert slow_node.master_replica.stasher.stash_size(STASH_CATCH_UP) == 0
# Repair the network so COMMITs are received, processed and stashed
slow_node.reset_delays_and_process_delayeds(COMMIT)
def chk2():
# COMMITs are processed for prepared messages
assert slow_master_replica._ordering_service.spylog.count(
slow_master_replica._ordering_service._validate) > old_pc_count
looper.run(eventually(chk2, retryWait=1, timeout=5))
def chk3():
# (delay_batches * Max3PCBatchSize * commits_count_in_phase) COMMITs are stashed
assert slow_node.master_replica.stasher.stash_size(STASH_CATCH_UP) == \
delay_batches * Max3PCBatchSize * (len(txnPoolNodeSet) - 1)
looper.run(eventually(chk3, retryWait=1, timeout=15))
# fix catchup, so the node gets a chance to be caught-up
repair_node_catchup_less(other_nodes)
def chk4():
# Some COMMITs were received but stashed and
# they will processed after catchup
assert slow_node.master_replica.stasher.stash_size(STASH_CATCH_UP) == 0
looper.run(eventually(chk4, retryWait=1, timeout=catchup_rep_delay + 50))
def chk5():
# Catchup was done once
assert slow_node.spylog.count(
slow_node.allLedgersCaughtUp) > old_lcu_count
looper.run(
eventually(
chk5,
retryWait=1,
timeout=waits.expectedPoolCatchupTime(
len(txnPoolNodeSet))))
# make sure that the pool is functional
checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1)
ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 2 * Max3PCBatchSize)
ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
| apache-2.0 | -1,431,413,913,786,530,000 | 40.489796 | 117 | 0.693884 | false | 3.469283 | true | false | false | 0.000656 |
NORDUnet/opennsa | test/test_linkvector.py | 1 | 6142 | from twisted.trial import unittest
from opennsa.topology import linkvector
ARUBA_PORT = 'aru'
BONAIRE_PORT = 'bon'
CURACAO_PORT = 'cur'
DOMINICA_PORT = 'dom'
LOCAL_TOPO = 'local:topo'
ARUBA_TOPO = 'aruba:topo'
BONAIRE_TOPO = 'bonaire:topo'
CURACAO_TOPO = 'curacao:topo'
DOMINCA_TOPO = 'dominica:topo'
class LinkVectorTest(unittest.TestCase):
def setUp(self):
self.rv = linkvector.LinkVector( [ LOCAL_TOPO ] )
def testNoReachability(self):
self.rv.updateVector(ARUBA_TOPO, ARUBA_PORT, { ARUBA_TOPO : 1, BONAIRE_TOPO : 2 , CURACAO_TOPO : 3 } )
self.failUnlessEqual(self.rv.vector(ARUBA_TOPO, source=LOCAL_TOPO), (None, None))
def testPathfindingVectorManualVectors(self):
self.rv.updateVector(LOCAL_TOPO, ARUBA_PORT, { ARUBA_TOPO : 1 } )
self.rv.updateVector(ARUBA_TOPO, ARUBA_PORT, { ARUBA_TOPO : 1, BONAIRE_TOPO : 2 , CURACAO_TOPO : 3 } )
self.failUnlessEqual(self.rv.vector(ARUBA_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual(self.rv.vector(BONAIRE_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual(self.rv.vector(CURACAO_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
# self.failUnlessEquals( self.rv.listVectors(), { ARUBA_TOPO : 1, BONAIRE_TOPO : 2, CURACAO_TOPO : 3 } )
self.rv.updateVector(BONAIRE_TOPO, BONAIRE_PORT, { BONAIRE_TOPO: 1, CURACAO_TOPO : 2 } )
self.failUnlessEqual( self.rv.vector(ARUBA_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual( self.rv.vector(BONAIRE_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual( self.rv.vector(CURACAO_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
# self.failUnlessEquals( self.rv.listVectors(), { ARUBA_TOPO : 1, BONAIRE_TOPO : 1, CURACAO_TOPO : 2 } )
# def testLocalNetworkExclusion(self):
#
# # i think this test is bogus now
# self.rv = linkvector.LinkVector(local_networks=[ BONAIRE_TOPO ])
#
# self.rv.updateVector(ARUBA_TOPO, ARUBA_PORT, { ARUBA_TOPO : 1, BONAIRE_TOPO : 1, CURACAO_TOPO : 2 } )
#
# self.failUnlessEqual( self.rv.vector(ARUBA_TOPO, source=BONAIRE_TOPO), (None, None))
# self.failUnlessEqual( self.rv.vector(BONAIRE_TOPO, source=BONAIRE_TOPO), (None, None))
# self.failUnlessEqual( self.rv.vector(CURACAO_TOPO, source=BONAIRE_TOPO), (BONAIRE_TOPO, BONAIRE_PORT))
def testBlackList(self):
self.rv = linkvector.LinkVector( [ BONAIRE_TOPO ], blacklist_networks = [ CURACAO_TOPO ] )
self.rv.updateVector(BONAIRE_TOPO, ARUBA_PORT, { ARUBA_TOPO : 1, CURACAO_TOPO : 2 } )
self.rv.updateVector(BONAIRE_TOPO, CURACAO_PORT, { CURACAO_TOPO : 1 } )
self.failUnlessEqual( self.rv.vector(ARUBA_TOPO, source=BONAIRE_TOPO), (BONAIRE_TOPO, ARUBA_PORT))
self.failUnlessEqual( self.rv.vector(BONAIRE_TOPO, source=BONAIRE_TOPO), (None, None))
self.failUnlessEqual( self.rv.vector(CURACAO_TOPO, source=BONAIRE_TOPO), (None, None))
def testMaxCost(self):
self.rv = linkvector.LinkVector( [ LOCAL_TOPO ], max_cost=3 )
self.rv.updateVector(LOCAL_TOPO, ARUBA_PORT, { ARUBA_TOPO : 1, BONAIRE_TOPO : 2 , CURACAO_TOPO : 4 } )
self.failUnlessEqual( self.rv.vector(ARUBA_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual( self.rv.vector(BONAIRE_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual( self.rv.vector(CURACAO_TOPO, source=LOCAL_TOPO), (None, None))
def testUnreachabilityThenReachability(self):
self.rv = linkvector.LinkVector( [ LOCAL_TOPO ] )
self.rv.updateVector(LOCAL_TOPO, ARUBA_PORT, { ARUBA_TOPO : 1 } )
self.rv.updateVector(BONAIRE_TOPO, CURACAO_PORT, { CURACAO_TOPO : 1 } )
self.failUnlessEqual(self.rv.vector(ARUBA_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual(self.rv.vector(BONAIRE_TOPO, source=LOCAL_TOPO), (None, None))
self.failUnlessEqual(self.rv.vector(CURACAO_TOPO, source=LOCAL_TOPO), (None, None))
self.rv.updateVector(ARUBA_TOPO, BONAIRE_PORT, { BONAIRE_TOPO : 1 } )
self.failUnlessEqual(self.rv.vector(CURACAO_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
def testMultiNetworkReachability(self):
self.rv = linkvector.LinkVector( [ LOCAL_TOPO ] )
self.rv.updateVector(LOCAL_TOPO, ARUBA_PORT, { ARUBA_TOPO : 1 } )
self.rv.updateVector(ARUBA_TOPO, BONAIRE_PORT, { BONAIRE_TOPO : 1 } )
self.failUnlessEqual(self.rv.vector(ARUBA_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual(self.rv.vector(BONAIRE_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
self.failUnlessEqual(self.rv.vector(CURACAO_TOPO, source=LOCAL_TOPO), (None, None))
self.rv.updateVector(BONAIRE_TOPO, CURACAO_PORT, { CURACAO_TOPO : 1 } )
self.failUnlessEqual(self.rv.vector(CURACAO_TOPO, source=LOCAL_TOPO), (LOCAL_TOPO, ARUBA_PORT))
def testLocalThenRemoteVector(self):
ARUBA_OJS_NET = 'aruba:ojs'
ARUBA_SAN_NET = 'aruba:san'
self.rv = linkvector.LinkVector( [ ARUBA_OJS_NET, ARUBA_SAN_NET ] )
self.failUnlessEqual(self.rv.vector(ARUBA_OJS_NET, source=ARUBA_SAN_NET), (None, None))
self.failUnlessEqual(self.rv.vector(ARUBA_SAN_NET, source=ARUBA_OJS_NET), (None, None))
self.rv.updateVector(ARUBA_OJS_NET, 'san', { ARUBA_SAN_NET: 1 } )
self.rv.updateVector(ARUBA_SAN_NET, 'ojs', { ARUBA_OJS_NET: 1 } )
self.rv.updateVector(ARUBA_SAN_NET, 'bon', { BONAIRE_TOPO: 1 } )
self.failUnlessEqual(self.rv.vector(ARUBA_OJS_NET, source=ARUBA_SAN_NET), (ARUBA_SAN_NET, 'ojs'))
self.failUnlessEqual(self.rv.vector(ARUBA_SAN_NET, source=ARUBA_OJS_NET), (ARUBA_OJS_NET, 'san'))
self.failUnlessEqual(self.rv.vector(BONAIRE_TOPO, source=ARUBA_OJS_NET), (ARUBA_OJS_NET, 'san'))
self.failUnlessEqual(self.rv.vector(BONAIRE_TOPO, source=ARUBA_SAN_NET), (ARUBA_SAN_NET, 'bon'))
| bsd-3-clause | 3,396,600,941,055,753,700 | 43.832117 | 113 | 0.667047 | false | 2.637183 | true | false | false | 0.027027 |
kidburglar/youtube-dl | youtube_dl/extractor/phoenix.py | 61 | 1506 | from __future__ import unicode_literals
from .dreisat import DreiSatIE
class PhoenixIE(DreiSatIE):
IE_NAME = 'phoenix.de'
_VALID_URL = r'''(?x)https?://(?:www\.)?phoenix\.de/content/
(?:
phoenix/die_sendungen/(?:[^/]+/)?
)?
(?P<id>[0-9]+)'''
_TESTS = [
{
'url': 'http://www.phoenix.de/content/884301',
'md5': 'ed249f045256150c92e72dbb70eadec6',
'info_dict': {
'id': '884301',
'ext': 'mp4',
'title': 'Michael Krons mit Hans-Werner Sinn',
'description': 'Im Dialog - Sa. 25.10.14, 00.00 - 00.35 Uhr',
'upload_date': '20141025',
'uploader': 'Im Dialog',
}
},
{
'url': 'http://www.phoenix.de/content/phoenix/die_sendungen/869815',
'only_matching': True,
},
{
'url': 'http://www.phoenix.de/content/phoenix/die_sendungen/diskussionen/928234',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
internal_id = self._search_regex(
r'<div class="phx_vod" id="phx_vod_([0-9]+)"',
webpage, 'internal video ID')
api_url = 'http://www.phoenix.de/php/mediaplayer/data/beitrags_details.php?ak=web&id=%s' % internal_id
return self.extract_from_xml_url(video_id, api_url)
| unlicense | -2,620,157,791,909,121,000 | 32.466667 | 110 | 0.507968 | false | 3.079755 | false | false | false | 0.001992 |
NL66278/OCB | addons/document/document.py | 93 | 83590 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import errno
import logging
import os
import random
import shutil
import string
import time
from StringIO import StringIO
import psycopg2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.osv.orm import except_orm
import openerp.report.interface
from openerp.tools.misc import ustr
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
from content_index import cntIndex
_logger = logging.getLogger(__name__)
class document_file(osv.osv):
_inherit = 'ir.attachment'
_columns = {
# Columns from ir.attachment:
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
# Fields of document:
'user_id': fields.many2one('res.users', 'Owner', select=1),
'parent_id': fields.many2one('document.directory', 'Directory', select=1, change_default=True),
'index_content': fields.text('Indexed Content'),
'partner_id':fields.many2one('res.partner', 'Partner', select=1),
'file_type': fields.char('Content Type'),
}
_order = "id desc"
_defaults = {
'user_id': lambda self, cr, uid, ctx:uid,
}
_sql_constraints = [
('filename_unique', 'unique (name,parent_id)', 'The filename must be unique in a directory !'),
]
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Overwrite check to verify access on directory to validate specifications of doc/access_permissions.rst"""
if not isinstance(ids, list):
ids = [ids]
super(document_file, self).check(cr, uid, ids, mode, context=context, values=values)
if ids:
self.pool.get('ir.model.access').check(cr, uid, 'document.directory', mode)
# use SQL to avoid recursive loop on read
cr.execute('SELECT DISTINCT parent_id from ir_attachment WHERE id in %s AND parent_id is not NULL', (tuple(ids),))
self.pool.get('document.directory').check_access_rule(cr, uid, [parent_id for (parent_id,) in cr.fetchall()], mode, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# Grab ids, bypassing 'count'
ids = super(document_file, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=False)
if not ids:
return 0 if count else []
# Filter out documents that are in directories that the user is not allowed to read.
# Must use pure SQL to avoid access rules exceptions (we want to remove the records,
# not fail), and the records have been filtered in parent's search() anyway.
cr.execute('SELECT id, parent_id from ir_attachment WHERE id in %s', (tuple(ids),))
# cont a dict of parent -> attach
parents = {}
for attach_id, attach_parent in cr.fetchall():
parents.setdefault(attach_parent, []).append(attach_id)
parent_ids = parents.keys()
# filter parents
visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
# null parents means allowed
ids = parents.get(None,[])
for parent_id in visible_parent_ids:
ids.extend(parents[parent_id])
return len(ids) if count else ids
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if 'name' not in default:
name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update(name=_("%s (copy)") % (name))
return super(document_file, self).copy(cr, uid, id, default, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
# take partner from uid
if vals.get('res_id', False) and vals.get('res_model', False) and not vals.get('partner_id', False):
vals['partner_id'] = self.__get_partner_id(cr, uid, vals['res_model'], vals['res_id'], context)
if vals.get('datas', False):
vals['file_type'], vals['index_content'] = self._index(cr, uid, vals['datas'].decode('base64'), vals.get('datas_fname', False), None)
return super(document_file, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if vals.get('datas', False):
vals['file_type'], vals['index_content'] = self._index(cr, uid, vals['datas'].decode('base64'), vals.get('datas_fname', False), None)
return super(document_file, self).write(cr, uid, ids, vals, context)
def _index(self, cr, uid, data, datas_fname, file_type):
mime, icont = cntIndex.doIndex(data, datas_fname, file_type or None, None)
icont_u = ustr(icont)
return mime, icont_u
def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
""" A helper to retrieve the associated partner from any res_model+id
It is a hack that will try to discover if the mentioned record is
clearly associated with a partner record.
"""
obj_model = self.pool[res_model]
if obj_model._name == 'res.partner':
return res_id
elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
bro = obj_model.browse(cr, uid, res_id, context=context)
return bro.partner_id.id
return False
class document_directory(osv.osv):
_name = 'document.directory'
_description = 'Directory'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True, select=1),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'user_id': fields.many2one('res.users', 'Owner'),
'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
'parent_id': fields.many2one('document.directory', 'Parent Directory', select=1, change_default=True),
'child_ids': fields.one2many('document.directory', 'parent_id', 'Children'),
'file_ids': fields.one2many('ir.attachment', 'parent_id', 'Files'),
'content_ids': fields.one2many('document.directory.content', 'directory_id', 'Virtual Files'),
'type': fields.selection([ ('directory','Static Directory'), ('ressource','Folders per resource'), ],
'Type', required=True, select=1, change_default=True,
help="Each directory can either have the type Static or be linked to another resource. A static directory, as with Operating Systems, is the classic directory that can contain a set of files. The directories linked to systems resources automatically possess sub-directories for each of resource types defined in the parent directory."),
'domain': fields.char('Domain', help="Use a domain if you want to apply an automatic filter on visible resources."),
'ressource_type_id': fields.many2one('ir.model', 'Resource model', change_default=True,
help="Select an object here and there will be one folder per record of that resource."),
'resource_field': fields.many2one('ir.model.fields', 'Name field', help='Field to be used as name on resource directories. If empty, the "name" will be used.'),
'resource_find_all': fields.boolean('Find all resources',
help="If true, all attachments that match this resource will " \
" be located. If false, only ones that have this as parent." ),
'ressource_parent_type_id': fields.many2one('ir.model', 'Parent Model', change_default=True,
help="If you put an object here, this directory template will appear bellow all of these objects. " \
"Such directories are \"attached\" to the specific model or record, just like attachments. " \
"Don't put a parent directory if you select a parent model."),
'ressource_id': fields.integer('Resource ID',
help="Along with Parent Model, this ID attaches this folder to a specific record of Parent Model."),
'ressource_tree': fields.boolean('Tree Structure',
help="Check this if you want to use the same tree structure as the object selected in the system."),
'dctx_ids': fields.one2many('document.directory.dctx', 'dir_id', 'Context fields'),
'company_id': fields.many2one('res.company', 'Company', change_default=True),
}
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'document.directory', context=c),
'user_id': lambda self,cr,uid,ctx: uid,
'domain': '[]',
'type': 'directory',
'ressource_id': 0,
'resource_find_all': True,
}
_sql_constraints = [
('dirname_uniq', 'unique (name,parent_id,ressource_id,ressource_parent_type_id)', 'The directory name must be unique !'),
('no_selfparent', 'check(parent_id <> id)', 'Directory cannot be parent of itself!'),
]
def name_get(self, cr, uid, ids, context=None):
res = []
if not self.search(cr,uid,[('id','in',ids)]):
ids = []
for d in self.browse(cr, uid, ids, context=context):
s = ''
d2 = d
while d2 and d2.parent_id:
s = d2.name + (s and ('/' + s) or '')
d2 = d2.parent_id
res.append((d.id, s or d.name))
return res
def get_full_path(self, cr, uid, dir_id, context=None):
""" Return the full path to this directory, in a list, root first
"""
if isinstance(dir_id, (tuple, list)):
assert len(dir_id) == 1
dir_id = dir_id[0]
def _parent(dir_id, path):
parent=self.browse(cr, uid, dir_id)
if parent.parent_id and not parent.ressource_parent_type_id:
_parent(parent.parent_id.id,path)
path.append(parent.name)
else:
path.append(parent.name)
return path
path = []
_parent(dir_id, path)
return path
_constraints = [
(osv.osv._check_recursion, 'Error! You cannot create recursive directories.', ['parent_id'])
]
def onchange_content_id(self, cr, uid, ids, ressource_type_id):
return {}
def get_object(self, cr, uid, uri, context=None):
""" Return a node object for the given uri.
This fn merely passes the call to node_context
"""
return get_node_context(cr, uid, context).get_uri(cr, uri)
def get_node_class(self, cr, uid, ids, dbro=None, dynamic=False, context=None):
"""Retrieve the class of nodes for this directory
This function can be overriden by inherited classes ;)
@param dbro The browse object, if caller already has it
"""
if dbro is None:
dbro = self.browse(cr, uid, ids, context=context)
if dynamic:
return node_res_obj
elif dbro.type == 'directory':
return node_dir
elif dbro.type == 'ressource':
return node_res_dir
else:
raise ValueError("dir node for %s type.", dbro.type)
def _prepare_context(self, cr, uid, nctx, context=None):
""" Fill nctx with properties for this database
@param nctx instance of nodes.node_context, to be filled
@param context ORM context (dict) for us
Note that this function is called *without* a list of ids,
it should behave the same for the whole database (based on the
ORM instance of document.directory).
Some databases may override this and attach properties to the
node_context. See WebDAV, CalDAV.
"""
return
def get_dir_permissions(self, cr, uid, ids, context=None):
"""Check what permission user 'uid' has on directory 'id'
"""
assert len(ids) == 1
res = 0
for pperms in [('read', 5), ('write', 2), ('unlink', 8)]:
try:
self.check_access_rule(cr, uid, ids, pperms[0], context=context)
res |= pperms[1]
except except_orm:
pass
return res
def _locate_child(self, cr, uid, root_id, uri, nparent, ncontext):
""" try to locate the node in uri,
Return a tuple (node_dir, remaining_path)
"""
return (node_database(context=ncontext), uri)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default ={}
name = self.read(cr, uid, [id])[0]['name']
default.update(name=_("%s (copy)") % (name))
return super(document_directory,self).copy(cr, uid, id, default, context=context)
def _check_duplication(self, cr, uid, vals, ids=None, op='create'):
name=vals.get('name',False)
parent_id=vals.get('parent_id',False)
ressource_parent_type_id=vals.get('ressource_parent_type_id',False)
ressource_id=vals.get('ressource_id',0)
if op=='write':
for directory in self.browse(cr, SUPERUSER_ID, ids):
if not name:
name=directory.name
if not parent_id:
parent_id=directory.parent_id and directory.parent_id.id or False
# TODO fix algo
if not ressource_parent_type_id:
ressource_parent_type_id=directory.ressource_parent_type_id and directory.ressource_parent_type_id.id or False
if not ressource_id:
ressource_id=directory.ressource_id and directory.ressource_id or 0
res=self.search(cr,uid,[('id','<>',directory.id),('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
if op=='create':
res = self.search(cr, SUPERUSER_ID, [('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
return True
def write(self, cr, uid, ids, vals, context=None):
if not self._check_duplication(cr, uid, vals, ids, op='write'):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
return super(document_directory,self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if not self._check_duplication(cr, uid, vals):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
newname = vals.get('name',False)
if newname:
for illeg in ('/', '@', '$', '#'):
if illeg in newname:
raise osv.except_osv(_('ValidateError'), _('Directory name contains special characters!'))
return super(document_directory,self).create(cr, uid, vals, context)
class document_directory_dctx(osv.osv):
""" In order to evaluate dynamic folders, child items could have a limiting
domain expression. For that, their parents will export a context where useful
information will be passed on.
If you define sth like "s_id" = "this.id" at a folder iterating over sales, its
children could have a domain like [('sale_id', = ,s_id )]
This system should be used recursively, that is, parent dynamic context will be
appended to all children down the tree.
"""
_name = 'document.directory.dctx'
_description = 'Directory Dynamic Context'
_columns = {
'dir_id': fields.many2one('document.directory', 'Directory', required=True, ondelete="cascade"),
'field': fields.char('Field', required=True, select=1, help="The name of the field."),
'expr': fields.char('Expression', required=True, help="A python expression used to evaluate the field.\n" + \
"You can use 'dir_id' for current dir, 'res_id', 'res_model' as a reference to the current record, in dynamic folders"),
}
class document_directory_content_type(osv.osv):
_name = 'document.directory.content.type'
_description = 'Directory Content Type'
_columns = {
'name': fields.char('Content Type', required=True),
'code': fields.char('Extension', size=4),
'active': fields.boolean('Active'),
'mimetype': fields.char('Mime Type')
}
_defaults = {
'active': lambda *args: 1
}
class document_directory_content(osv.osv):
_name = 'document.directory.content'
_description = 'Directory Content'
_order = "sequence"
def _extension_get(self, cr, uid, context=None):
cr.execute('select code,name from document_directory_content_type where active')
res = cr.fetchall()
return res
_columns = {
'name': fields.char('Content Name', required=True),
'sequence': fields.integer('Sequence', size=16),
'prefix': fields.char('Prefix', size=16),
'suffix': fields.char('Suffix', size=16),
'report_id': fields.many2one('ir.actions.report.xml', 'Report'),
'extension': fields.selection(_extension_get, 'Document Type', required=True, size=4),
'include_name': fields.boolean('Include Record Name',
help="Check this field if you want that the name of the file to contain the record name." \
"\nIf set, the directory will have to be a resource one."),
'directory_id': fields.many2one('document.directory', 'Directory'),
}
_defaults = {
'extension': lambda *args: '.pdf',
'sequence': lambda *args: 1,
'include_name': lambda *args: 1,
}
def _file_get(self, cr, node, nodename, content, context=None):
""" return the nodes of a <node> parent having a <content> content
The return value MUST be false or a list of node_class objects.
"""
# TODO: respect the context!
model = node.res_model
if content.include_name and not model:
return False
res2 = []
tname = ''
if content.include_name:
record_name = node.displayname or ''
if record_name:
tname = (content.prefix or '') + record_name + (content.suffix or '') + (content.extension or '')
else:
tname = (content.prefix or '') + (content.name or '') + (content.suffix or '') + (content.extension or '')
if tname.find('/'):
tname=tname.replace('/', '_')
act_id = False
if 'dctx_res_id' in node.dctx:
act_id = node.dctx['res_id']
elif hasattr(node, 'res_id'):
act_id = node.res_id
else:
act_id = node.context.context.get('res_id',False)
if not nodename:
n = node_content(tname, node, node.context,content, act_id=act_id)
res2.append( n)
else:
if nodename == tname:
n = node_content(tname, node, node.context,content, act_id=act_id)
n.fill_fields(cr)
res2.append(n)
return res2
def process_write(self, cr, uid, node, data, context=None):
if node.extension != '.pdf':
raise Exception("Invalid content: %s" % node.extension)
return True
def process_read(self, cr, uid, node, context=None):
if node.extension != '.pdf':
raise Exception("Invalid content: %s" % node.extension)
report = self.pool.get('ir.actions.report.xml').browse(cr, uid, node.report_id, context=context)
srv = openerp.report.interface.report_int._reports['report.'+report.report_name]
ctx = node.context.context.copy()
ctx.update(node.dctx)
pdf,pdftype = srv.create(cr, uid, [node.act_id,], {}, context=ctx)
return pdf
class ir_action_report_xml(osv.osv):
_name="ir.actions.report.xml"
_inherit ="ir.actions.report.xml"
def _model_get(self, cr, uid, ids, name, arg, context=None):
res = {}
model_pool = self.pool.get('ir.model')
for data in self.read(cr, uid, ids, ['model']):
model = data.get('model',False)
if model:
model_id =model_pool.search(cr, uid, [('model','=',model)])
if model_id:
res[data.get('id')] = model_id[0]
else:
res[data.get('id')] = False
return res
def _model_search(self, cr, uid, obj, name, args, context=None):
if not len(args):
return []
assert len(args) == 1 and args[0][1] == '=', 'expression is not what we expect: %r' % args
model_id= args[0][2]
if not model_id:
# a deviation from standard behavior: when searching model_id = False
# we return *all* reports, not just ones with empty model.
# One reason is that 'model' is a required field so far
return []
model = self.pool.get('ir.model').read(cr, uid, [model_id])[0]['model']
report_id = self.search(cr, uid, [('model','=',model)])
if not report_id:
return [('id','=','0')]
return [('id','in',report_id)]
_columns={
'model_id' : fields.function(_model_get, fnct_search=_model_search, string='Model Id'),
}
class document_storage(osv.osv):
""" The primary object for data storage. Deprecated. """
_name = 'document.storage'
_description = 'Storage Media'
def get_data(self, cr, uid, id, file_node, context=None, fil_obj=None):
""" retrieve the contents of some file_node having storage_id = id
optionally, fil_obj could point to the browse object of the file
(ir.attachment)
"""
boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
data = ira.datas
if data:
out = data.decode('base64')
else:
out = ''
return out
def get_file(self, cr, uid, id, file_node, mode, context=None):
""" Return a file-like object for the contents of some node
"""
if context is None:
context = {}
boo = self.browse(cr, uid, id, context=context)
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
return nodefd_db(file_node, ira_browse=ira, mode=mode)
def set_data(self, cr, uid, id, file_node, data, context=None, fil_obj=None):
""" store the data.
This function MUST be used from an ir.attachment. It wouldn't make sense
to store things persistently for other types (dynamic).
"""
boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
_logger.debug( "Store data for ir.attachment #%d." % ira.id)
store_fname = None
fname = None
filesize = len(data)
self.pool.get('ir.attachment').write(cr, uid, [file_node.file_id], {'datas': data.encode('base64')}, context=context)
# 2nd phase: store the metadata
try:
icont = ''
mime = ira.file_type
if not mime:
mime = ""
try:
mime, icont = cntIndex.doIndex(data, ira.datas_fname, ira.file_type or None, fname)
except Exception:
_logger.debug('Cannot index file.', exc_info=True)
pass
try:
icont_u = ustr(icont)
except UnicodeError:
icont_u = ''
# a hack: /assume/ that the calling write operation will not try
# to write the fname and size, and update them in the db concurrently.
# We cannot use a write() here, because we are already in one.
cr.execute('UPDATE ir_attachment SET file_size = %s, index_content = %s, file_type = %s WHERE id = %s', (filesize, icont_u, mime, file_node.file_id))
self.pool.get('ir.attachment').invalidate_cache(cr, uid, ['file_size', 'index_content', 'file_type'], [file_node.file_id], context=context)
file_node.content_length = filesize
file_node.content_type = mime
return True
except Exception, e :
_logger.warning("Cannot save data.", exc_info=True)
# should we really rollback once we have written the actual data?
# at the db case (only), that rollback would be safe
raise except_orm(_('Error at doc write!'), str(e))
def _str2time(cre):
""" Convert a string with time representation (from db) into time (float)
Note: a place to fix if datetime is used in db.
"""
if not cre:
return time.time()
frac = 0.0
if isinstance(cre, basestring) and '.' in cre:
fdot = cre.find('.')
frac = float(cre[fdot:])
cre = cre[:fdot]
return time.mktime(time.strptime(cre,'%Y-%m-%d %H:%M:%S')) + frac
def get_node_context(cr, uid, context):
return node_context(cr, uid, context)
#
# An object that represent an uri
# path: the uri of the object
# content: the Content it belongs to (_print.pdf)
# type: content or collection
# content: objct = res.partner
# collection: object = directory, object2 = res.partner
# file: objct = ir.attachement
# root: if we are at the first directory of a ressource
#
class node_context(object):
""" This is the root node, representing access to some particular context
A context is a set of persistent data, which may influence the structure
of the nodes. All other transient information during a data query should
be passed down with function arguments.
"""
cached_roots = {}
node_file_class = None
def __init__(self, cr, uid, context=None):
self.dbname = cr.dbname
self.uid = uid
self.context = context
if context is None:
context = {}
context['uid'] = uid
self._dirobj = openerp.registry(cr.dbname).get('document.directory')
self.node_file_class = node_file
self.extra_ctx = {} # Extra keys for context, that do _not_ trigger inequality
assert self._dirobj
self._dirobj._prepare_context(cr, uid, self, context=context)
self.rootdir = False #self._dirobj._get_root_directory(cr,uid,context)
def __eq__(self, other):
if not type(other) == node_context:
return False
if self.dbname != other.dbname:
return False
if self.uid != other.uid:
return False
if self.context != other.context:
return False
if self.rootdir != other.rootdir:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def get(self, name, default=None):
return self.context.get(name, default)
def get_uri(self, cr, uri):
""" Although this fn passes back to doc.dir, it is needed since
it is a potential caching point.
"""
(ndir, duri) = self._dirobj._locate_child(cr, self.uid, self.rootdir, uri, None, self)
while duri:
ndir = ndir.child(cr, duri[0])
if not ndir:
return False
duri = duri[1:]
return ndir
def get_dir_node(self, cr, dbro):
"""Create (or locate) a node for a directory
@param dbro a browse object of document.directory
"""
fullpath = dbro.get_full_path(context=self.context)
klass = dbro.get_node_class(dbro, context=self.context)
return klass(fullpath, None ,self, dbro)
def get_file_node(self, cr, fbro):
""" Create or locate a node for a static file
@param fbro a browse object of an ir.attachment
"""
parent = None
if fbro.parent_id:
parent = self.get_dir_node(cr, fbro.parent_id)
return self.node_file_class(fbro.name, parent, self, fbro)
class node_class(object):
""" this is a superclass for our inodes
It is an API for all code that wants to access the document files.
Nodes have attributes which contain usual file properties
"""
our_type = 'baseclass'
DAV_PROPS = None
DAV_M_NS = None
def __init__(self, path, parent, context):
assert isinstance(context,node_context)
assert (not parent ) or isinstance(parent,node_class)
self.path = path
self.context = context
self.type=self.our_type
self.parent = parent
self.uidperms = 5 # computed permissions for our uid, in unix bits
self.mimetype = 'application/octet-stream'
self.create_date = None
self.write_date = None
self.unixperms = 0660
self.uuser = 'user'
self.ugroup = 'group'
self.content_length = 0
# dynamic context:
self.dctx = {}
if parent:
self.dctx = parent.dctx.copy()
self.displayname = 'Object'
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def full_path(self):
""" Return the components of the full path for some
node.
The returned list only contains the names of nodes.
"""
if self.parent:
s = self.parent.full_path()
else:
s = []
if isinstance(self.path,list):
s+=self.path
elif self.path is None:
s.append('')
else:
s.append(self.path)
return s #map(lambda x: '/' +x, s)
def __repr__(self):
return "%s@/%s" % (self.our_type, '/'.join(self.full_path()))
def children(self, cr, domain=None):
print "node_class.children()"
return [] #stub
def child(self, cr, name, domain=None):
print "node_class.child()"
return None
def get_uri(self, cr, uri):
duri = uri
ndir = self
while duri:
ndir = ndir.child(cr, duri[0])
if not ndir:
return False
duri = duri[1:]
return ndir
def path_get(self):
print "node_class.path_get()"
return False
def get_data(self, cr):
raise TypeError('No data for %s.'% self.type)
def open_data(self, cr, mode):
""" Open a node_descriptor object for this node.
@param the mode of open, eg 'r', 'w', 'a', like file.open()
This operation may lock the data for this node (and accross
other node hierarchies), until the descriptor is close()d. If
the node is locked, subsequent opens (depending on mode) may
immediately fail with an exception (which?).
For this class, there is no data, so no implementation. Each
child class that has data should override this.
"""
raise TypeError('No data for %s.' % self.type)
def get_etag(self, cr):
""" Get a tag, unique per object + modification.
see. http://tools.ietf.org/html/rfc2616#section-13.3.3 """
return '"%s-%s"' % (self._get_ttag(cr), self._get_wtag(cr))
def _get_wtag(self, cr):
""" Return the modification time as a unique, compact string """
return str(_str2time(self.write_date)).replace('.','')
def _get_ttag(self, cr):
""" Get a unique tag for this type/id of object.
Must be overriden, so that each node is uniquely identified.
"""
print "node_class.get_ttag()",self
raise NotImplementedError("get_ttag stub()")
def get_dav_props(self, cr):
""" If this class has special behaviour for GroupDAV etc, export
its capabilities """
# This fn is placed here rather than WebDAV, because we want the
# baseclass methods to apply to all node subclasses
return self.DAV_PROPS or {}
def match_dav_eprop(self, cr, match, ns, prop):
res = self.get_dav_eprop(cr, ns, prop)
if res == match:
return True
return False
def get_dav_eprop(self, cr, ns, prop):
if not self.DAV_M_NS:
return None
if self.DAV_M_NS.has_key(ns):
prefix = self.DAV_M_NS[ns]
else:
_logger.debug('No namespace: %s ("%s").',ns, prop)
return None
mname = prefix + "_" + prop.replace('-','_')
if not hasattr(self, mname):
return None
try:
m = getattr(self, mname)
r = m(cr)
return r
except AttributeError:
_logger.debug('The property %s is not supported.' % prop, exc_info=True)
return None
def get_dav_resourcetype(self, cr):
""" Get the DAV resource type.
Is here because some nodes may exhibit special behaviour, like
CalDAV/GroupDAV collections
"""
raise NotImplementedError
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
""" Move this node to a new parent directory.
@param ndir_node the collection that this node should be moved under
@param new_name a name to rename this node to. If omitted, the old
name is preserved
@param fil_obj, can be None, is the browse object for the file,
if already available.
@param ndir_obj must be the browse object to the new doc.directory
location, where this node should be moved to.
in_write: When called by write(), we shouldn't attempt to write the
object, but instead return the dict of vals (avoid re-entrance).
If false, we should write all data to the object, here, as if the
caller won't do anything after calling move_to()
Return value:
True: the node is moved, the caller can update other values, too.
False: the node is either removed or fully updated, the caller
must discard the fil_obj, not attempt to write any more to it.
dict: values to write back to the object. *May* contain a new id!
Depending on src and target storage, implementations of this function
could do various things.
Should also consider node<->content, dir<->dir moves etc.
Move operations, as instructed from APIs (e.g. request from DAV) could
use this function.
"""
raise NotImplementedError(repr(self))
def create_child(self, cr, path, data=None):
""" Create a regular file under this node
"""
_logger.warning("Attempted to create a file under %r, not possible.", self)
raise IOError(errno.EPERM, "Not allowed to create file(s) here.")
def create_child_collection(self, cr, objname):
""" Create a child collection (directory) under self
"""
_logger.warning("Attempted to create a collection under %r, not possible.", self)
raise IOError(errno.EPERM, "Not allowed to create folder(s) here.")
def rm(self, cr):
raise NotImplementedError(repr(self))
def rmcol(self, cr):
raise NotImplementedError(repr(self))
def get_domain(self, cr, filters):
# TODO Document
return []
def check_perms(self, perms):
""" Check the permissions of the current node.
@param perms either an integers of the bits to check, or
a string with the permission letters
Permissions of nodes are (in a unix way):
1, x : allow descend into dir
2, w : allow write into file, or modification to dir
4, r : allow read of file, or listing of dir contents
8, u : allow remove (unlink)
"""
if isinstance(perms, str):
pe2 = 0
chars = { 'x': 1, 'w': 2, 'r': 4, 'u': 8 }
for c in perms:
pe2 = pe2 | chars[c]
perms = pe2
elif isinstance(perms, int):
if perms < 0 or perms > 15:
raise ValueError("Invalid permission bits.")
else:
raise ValueError("Invalid permission attribute.")
return ((self.uidperms & perms) == perms)
class node_database(node_class):
""" A node representing the database directory
"""
our_type = 'database'
def __init__(self, path=None, parent=False, context=None):
if path is None:
path = []
super(node_database,self).__init__(path, parent, context)
self.unixperms = 040750
self.uidperms = 5
def children(self, cr, domain=None):
res = self._child_get(cr, domain=domain) + self._file_get(cr)
return res
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=None)
if res:
return res[0]
res = self._file_get(cr,name)
if res:
return res[0]
return None
def _child_get(self, cr, name=False, domain=None):
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('parent_id','=', False), ('ressource_parent_type_id','=',False)]
if name:
where.append(('name','=',name))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
if domain:
where = where + domain
ids = dirobj.search(cr, uid, where, context=ctx)
res = []
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name, self, self.context,dirr))
return res
def _file_get(self, cr, nodename=False):
res = []
return res
def _get_ttag(self, cr):
return 'db-%s' % cr.dbname
def mkdosname(company_name, default='noname'):
""" convert a string to a dos-like name"""
if not company_name:
return default
badchars = ' !@#$%^`~*()+={}[];:\'"/?.<>'
n = ''
for c in company_name[:8]:
n += (c in badchars and '_') or c
return n
def _uid2unixperms(perms, has_owner):
""" Convert the uidperms and the owner flag to full unix bits
"""
res = 0
if has_owner:
res |= (perms & 0x07) << 6
res |= (perms & 0x05) << 3
elif perms & 0x02:
res |= (perms & 0x07) << 6
res |= (perms & 0x07) << 3
else:
res |= (perms & 0x07) << 6
res |= (perms & 0x05) << 3
res |= 0x05
return res
class node_dir(node_database):
our_type = 'collection'
def __init__(self, path, parent, context, dirr, dctx=None):
super(node_dir,self).__init__(path, parent,context)
self.dir_id = dirr and dirr.id or False
#todo: more info from dirr
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = dirr and dirr.create_date or False
self.domain = dirr and dirr.domain or []
self.res_model = dirr and dirr.ressource_type_id and dirr.ressource_type_id.model or False
# TODO: the write date should be MAX(file.write)..
self.write_date = dirr and (dirr.write_date or dirr.create_date) or False
self.content_length = 0
try:
self.uuser = (dirr.user_id and dirr.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(dirr.company_id and dirr.company_id.name, default='nogroup')
self.uidperms = dirr.get_dir_permissions()
self.unixperms = 040000 | _uid2unixperms(self.uidperms, dirr and dirr.user_id)
if dctx:
self.dctx.update(dctx)
dc2 = self.context.context
dc2.update(self.dctx)
dc2['dir_id'] = self.dir_id
self.displayname = dirr and dirr.name or False
if dirr and dirr.dctx_ids:
for dfld in dirr.dctx_ids:
try:
self.dctx[dfld.field] = safe_eval(dfld.expr,dc2)
except Exception,e:
print "Cannot eval %s." % dfld.expr
print e
pass
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
# Two directory nodes, for the same document.directory, may have a
# different context! (dynamic folders)
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def get_data(self, cr):
#res = ''
#for child in self.children(cr):
# res += child.get_data(cr)
return None
def _file_get(self, cr, nodename=False):
res = super(node_dir,self)._file_get(cr, nodename)
is_allowed = self.check_perms(nodename and 1 or 5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
res3 = cntobj._file_get(cr, self, nodename, content)
if res3:
res.extend(res3)
return res
def _child_get(self, cr, name=None, domain=None):
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('parent_id','=',self.dir_id)]
if name:
where.append(('name','=',name))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
if not domain:
domain = []
where2 = where + domain + [('ressource_parent_type_id','=',False)]
ids = dirobj.search(cr, uid, where2, context=ctx)
res = []
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name, self, self.context,dirr))
# Static directories should never return files with res_model/res_id
# because static dirs are /never/ related to a record.
# In fact, files related to some model and parented by the root dir
# (the default), will NOT be accessible in the node system unless
# a resource folder for that model exists (with resource_find_all=True).
# Having resource attachments in a common folder is bad practice,
# because they would be visible to all users, and their names may be
# the same, conflicting.
where += [('res_model', '=', False)]
fil_obj = dirobj.pool.get('ir.attachment')
ids = fil_obj.search(cr, uid, where, context=ctx)
if ids:
for fil in fil_obj.browse(cr, uid, ids, context=ctx):
klass = self.context.node_file_class
res.append(klass(fil.name, self, self.context, fil))
return res
def rmcol(self, cr):
uid = self.context.uid
directory = self.context._dirobj.browse(cr, uid, self.dir_id)
res = False
if not directory:
raise OSError(2, 'Not such file or directory.')
if not self.check_perms('u'):
raise IOError(errno.EPERM,"Permission denied.")
if directory._name == 'document.directory':
if self.children(cr):
raise OSError(39, 'Directory not empty.')
res = self.context._dirobj.unlink(cr, uid, [directory.id])
else:
raise OSError(1, 'Operation is not permitted.')
return res
def create_child_collection(self, cr, objname):
object2 = False
if not self.check_perms(2):
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
obj = dirobj.browse(cr, uid, self.dir_id)
if obj and (obj.type == 'ressource') and not object2:
raise OSError(1, 'Operation is not permitted.')
#objname = uri2[-1]
val = {
'name': objname,
'ressource_parent_type_id': obj and obj.ressource_type_id.id or False,
'ressource_id': object2 and object2.id or False,
'parent_id' : obj and obj.id or False
}
return dirobj.create(cr, uid, val)
def create_child(self, cr, path, data=None):
""" API function to create a child file object and node
Return the node_* created
"""
if not self.check_perms(2):
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
fil_obj=dirobj.pool.get('ir.attachment')
val = {
'name': path,
'datas_fname': path,
'parent_id': self.dir_id,
# Datas are not set here
}
fil_id = fil_obj.create(cr, uid, val, context=ctx)
fil = fil_obj.browse(cr, uid, fil_id, context=ctx)
fnode = node_file(path, self, self.context, fil)
if data is not None:
fnode.set_data(cr, data, fil)
return fnode
def _get_ttag(self, cr):
return 'dir-%d' % self.dir_id
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
""" Move directory. This operation is simple, since the present node is
only used for static, simple directories.
Note /may/ be called with ndir_node = None, to rename the document root.
"""
if ndir_node and (ndir_node.context != self.context):
raise NotImplementedError("Cannot move directories between contexts.")
if (not self.check_perms('u')) or (not ndir_node.check_perms('w')):
raise IOError(errno.EPERM,"Permission denied.")
dir_obj = self.context._dirobj
if not fil_obj:
dbro = dir_obj.browse(cr, self.context.uid, self.dir_id, context=self.context.context)
else:
dbro = dir_obj
assert dbro.id == self.dir_id
if not dbro:
raise IndexError("Cannot locate dir %d", self.dir_id)
if (not self.parent) and ndir_node:
if not dbro.parent_id:
raise IOError(errno.EPERM, "Cannot move the root directory!")
self.parent = self.context.get_dir_node(cr, dbro.parent_id)
assert self.parent
if self.parent != ndir_node:
_logger.debug('Cannot move dir %r from %r to %r.', self, self.parent, ndir_node)
raise NotImplementedError('Cannot move dir to another dir.')
ret = {}
if new_name and (new_name != dbro.name):
if ndir_node.child(cr, new_name):
raise IOError(errno.EEXIST, "Destination path already exists.")
ret['name'] = new_name
del dbro
if not in_write:
# We have to update the data ourselves
if ret:
ctx = self.context.context.copy()
ctx['__from_node'] = True
dir_obj.write(cr, self.context.uid, [self.dir_id,], ret, ctx)
ret = True
return ret
class node_res_dir(node_class):
""" A folder containing dynamic folders
A special sibling to node_dir, which does only contain dynamically
created folders foreach resource in the foreign model.
All folders should be of type node_res_obj and merely behave like
node_dirs (with limited domain).
"""
our_type = 'collection'
res_obj_class = None
def __init__(self, path, parent, context, dirr, dctx=None ):
super(node_res_dir,self).__init__(path, parent, context)
self.dir_id = dirr.id
#todo: more info from dirr
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = dirr.create_date
# TODO: the write date should be MAX(file.write)..
self.write_date = dirr.write_date or dirr.create_date
self.content_length = 0
try:
self.uuser = (dirr.user_id and dirr.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(dirr.company_id and dirr.company_id.name, default='nogroup')
self.uidperms = dirr.get_dir_permissions()
self.unixperms = 040000 | _uid2unixperms(self.uidperms, dirr and dirr.user_id)
self.res_model = dirr.ressource_type_id and dirr.ressource_type_id.model or False
self.resm_id = dirr.ressource_id
self.res_find_all = dirr.resource_find_all
self.namefield = dirr.resource_field.name or 'name'
self.displayname = dirr.name
# Important: the domain is evaluated using the *parent* dctx!
self.domain = dirr.domain
self.ressource_tree = dirr.ressource_tree
# and then, we add our own vars in the dctx:
if dctx:
self.dctx.update(dctx)
# and then, we prepare a dctx dict, for deferred evaluation:
self.dctx_dict = {}
for dfld in dirr.dctx_ids:
self.dctx_dict[dfld.field] = dfld.expr
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
# Two nodes, for the same document.directory, may have a
# different context! (dynamic folders)
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def children(self, cr, domain=None):
return self._child_get(cr, domain=domain)
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=domain)
if res:
return res[0]
return None
def _child_get(self, cr, name=None, domain=None):
""" return virtual children of resource, based on the
foreign object.
Note that many objects use NULL for a name, so we should
better call the name_search(),name_get() set of methods
"""
if self.res_model not in self.context._dirobj.pool:
return []
obj = self.context._dirobj.pool[self.res_model]
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
ctx.update(self.context.extra_ctx)
where = []
if self.domain:
app = safe_eval(self.domain, ctx)
if not app:
pass
elif isinstance(app, list):
where.extend(app)
elif isinstance(app, tuple):
where.append(app)
else:
raise RuntimeError("Incorrect domain expr: %s." % self.domain)
if self.resm_id:
where.append(('id','=',self.resm_id))
if name:
# The =like character will match underscores against any characters
# including the special ones that couldn't exist in a FTP/DAV request
where.append((self.namefield,'=like',name.replace('\\','\\\\')))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
# print "Where clause for %s" % self.res_model, where
if self.ressource_tree:
object2 = False
if self.resm_id:
object2 = dirobj.pool[self.res_model].browse(cr, uid, self.resm_id) or False
if obj._parent_name in obj.fields_get(cr, uid):
where.append((obj._parent_name,'=',object2 and object2.id or False))
resids = obj.search(cr, uid, where, context=ctx)
res = []
for bo in obj.browse(cr, uid, resids, context=ctx):
if not bo:
continue
res_name = getattr(bo, self.namefield)
if not res_name:
continue
# Yes! we can't do better but skip nameless records.
# Escape the name for characters not supported in filenames
res_name = res_name.replace('/','_') # any other weird char?
if name and (res_name != ustr(name)):
# we have matched _ to any character, but we only meant to match
# the special ones.
# Eg. 'a_c' will find 'abc', 'a/c', 'a_c', may only
# return 'a/c' and 'a_c'
continue
res.append(self.res_obj_class(res_name, self.dir_id, self, self.context, self.res_model, bo))
return res
def _get_ttag(self, cr):
return 'rdir-%d' % self.dir_id
class node_res_obj(node_class):
""" A dynamically created folder.
A special sibling to node_dir, which does only contain dynamically
created folders foreach resource in the foreign model.
All folders should be of type node_res_obj and merely behave like
node_dirs (with limited domain).
"""
our_type = 'collection'
def __init__(self, path, dir_id, parent, context, res_model, res_bo, res_id=None):
super(node_res_obj,self).__init__(path, parent,context)
assert parent
#todo: more info from dirr
self.dir_id = dir_id
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = parent.create_date
# TODO: the write date should be MAX(file.write)..
self.write_date = parent.write_date
self.content_length = 0
self.uidperms = parent.uidperms & 15
self.unixperms = 040000 | _uid2unixperms(self.uidperms, True)
self.uuser = parent.uuser
self.ugroup = parent.ugroup
self.res_model = res_model
self.domain = parent.domain
self.displayname = path
self.dctx_dict = parent.dctx_dict
if isinstance(parent, node_res_dir):
self.res_find_all = parent.res_find_all
else:
self.res_find_all = False
if res_bo:
self.res_id = res_bo.id
dc2 = self.context.context.copy()
dc2.update(self.dctx)
dc2['res_model'] = res_model
dc2['res_id'] = res_bo.id
dc2['this'] = res_bo
for fld,expr in self.dctx_dict.items():
try:
self.dctx[fld] = safe_eval(expr, dc2)
except Exception,e:
print "Cannot eval %s for %s." % (expr, fld)
print e
pass
else:
self.res_id = res_id
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
if not self.res_model == other.res_model:
return False
if not self.res_id == other.res_id:
return False
if self.domain != other.domain:
return False
if self.res_find_all != other.res_find_all:
return False
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def children(self, cr, domain=None):
return self._child_get(cr, domain=domain) + self._file_get(cr)
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=domain)
if res:
return res[0]
res = self._file_get(cr, name)
if res:
return res[0]
return None
def _file_get(self, cr, nodename=False):
res = []
is_allowed = self.check_perms((nodename and 1) or 5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
#if self.domain:
# where.extend(self.domain)
# print "res_obj file_get clause", where
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
res3 = cntobj._file_get(cr, self, nodename, content, context=ctx)
if res3:
res.extend(res3)
return res
def get_dav_props_DEPR(self, cr):
# Deprecated! (but document_ics must be cleaned, first)
res = {}
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
if content.extension == '.ics': # FIXME: call the content class!
res['http://groupdav.org/'] = ('resourcetype',)
return res
def get_dav_eprop_DEPR(self, cr, ns, prop):
# Deprecated!
if ns != 'http://groupdav.org/' or prop != 'resourcetype':
_logger.warning("Who asks for %s:%s?" % (ns, prop))
return None
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr,uid,where,context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
# TODO: remove relic of GroupDAV
if content.extension == '.ics': # FIXME: call the content class!
return ('vevent-collection','http://groupdav.org/')
return None
def _child_get(self, cr, name=None, domain=None):
dirobj = self.context._dirobj
is_allowed = self.check_perms((name and 1) or 5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
directory = dirobj.browse(cr, uid, self.dir_id)
obj = dirobj.pool[self.res_model]
where = []
res = []
if name:
where.append(('name','=',name))
# Directory Structure display in tree structure
if self.res_id and directory.ressource_tree:
where1 = []
if name:
where1.append(('name','=like',name.replace('\\','\\\\')))
if obj._parent_name in obj.fields_get(cr, uid):
where1.append((obj._parent_name, '=', self.res_id))
namefield = directory.resource_field.name or 'name'
resids = obj.search(cr, uid, where1, context=ctx)
for bo in obj.browse(cr, uid, resids, context=ctx):
if not bo:
continue
res_name = getattr(bo, namefield)
if not res_name:
continue
res_name = res_name.replace('/', '_')
if name and (res_name != ustr(name)):
continue
# TODO Revise
klass = directory.get_node_class(directory, dynamic=True, context=ctx)
rnode = klass(res_name, dir_id=self.dir_id, parent=self, context=self.context,
res_model=self.res_model, res_bo=bo)
rnode.res_find_all = self.res_find_all
res.append(rnode)
where2 = where + [('parent_id','=',self.dir_id) ]
ids = dirobj.search(cr, uid, where2, context=ctx)
bo = obj.browse(cr, uid, self.res_id, context=ctx)
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
if name and (name != dirr.name):
continue
if dirr.type == 'directory':
klass = dirr.get_node_class(dirr, dynamic=True, context=ctx)
res.append(klass(dirr.name, dirr.id, self, self.context, self.res_model, res_bo = bo, res_id = self.res_id))
elif dirr.type == 'ressource':
# child resources can be controlled by properly set dctx
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name,self,self.context, dirr, {'active_id': self.res_id})) # bo?
fil_obj = dirobj.pool.get('ir.attachment')
if self.res_find_all:
where2 = where
where3 = where2 + [('res_model', '=', self.res_model), ('res_id','=',self.res_id)]
# print "where clause for dir_obj", where3
ids = fil_obj.search(cr, uid, where3, context=ctx)
if ids:
for fil in fil_obj.browse(cr, uid, ids, context=ctx):
klass = self.context.node_file_class
res.append(klass(fil.name, self, self.context, fil))
# Get Child Ressource Directories
if directory.ressource_type_id and directory.ressource_type_id.id:
where4 = where + [('ressource_parent_type_id','=',directory.ressource_type_id.id)]
where5 = where4 + ['|', ('ressource_id','=',0), ('ressource_id','=',self.res_id)]
dirids = dirobj.search(cr,uid, where5)
for dirr in dirobj.browse(cr, uid, dirids, context=ctx):
if dirr.type == 'directory' and not dirr.parent_id:
klass = dirr.get_node_class(dirr, dynamic=True, context=ctx)
rnode = klass(dirr.name, dirr.id, self, self.context, self.res_model, res_bo = bo, res_id = self.res_id)
rnode.res_find_all = dirr.resource_find_all
res.append(rnode)
if dirr.type == 'ressource':
klass = dirr.get_node_class(dirr, context=ctx)
rnode = klass(dirr.name, self, self.context, dirr, {'active_id': self.res_id})
rnode.res_find_all = dirr.resource_find_all
res.append(rnode)
return res
def create_child_collection(self, cr, objname):
dirobj = self.context._dirobj
is_allowed = self.check_perms(2)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
res_obj = dirobj.pool[self.res_model]
object2 = res_obj.browse(cr, uid, self.res_id) or False
obj = dirobj.browse(cr, uid, self.dir_id)
if obj and (obj.type == 'ressource') and not object2:
raise OSError(1, 'Operation is not permitted.')
val = {
'name': objname,
'ressource_parent_type_id': obj and obj.ressource_type_id.id or False,
'ressource_id': object2 and object2.id or False,
'parent_id' : False,
'resource_find_all': False,
}
if (obj and (obj.type in ('directory'))) or not object2:
val['parent_id'] = obj and obj.id or False
return dirobj.create(cr, uid, val)
def create_child(self, cr, path, data=None):
""" API function to create a child file object and node
Return the node_* created
"""
is_allowed = self.check_perms(2)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
fil_obj=dirobj.pool.get('ir.attachment')
val = {
'name': path,
'datas_fname': path,
'res_model': self.res_model,
'res_id': self.res_id,
# Datas are not set here
}
if not self.res_find_all:
val['parent_id'] = self.dir_id
fil_id = fil_obj.create(cr, uid, val, context=ctx)
fil = fil_obj.browse(cr, uid, fil_id, context=ctx)
klass = self.context.node_file_class
fnode = klass(path, self, self.context, fil)
if data is not None:
fnode.set_data(cr, data, fil)
return fnode
def _get_ttag(self, cr):
return 'rodir-%d-%d' % (self.dir_id, self.res_id)
node_res_dir.res_obj_class = node_res_obj
class node_file(node_class):
our_type = 'file'
def __init__(self, path, parent, context, fil):
super(node_file,self).__init__(path, parent,context)
self.file_id = fil.id
#todo: more info from ir_attachment
if fil.file_type and '/' in fil.file_type:
self.mimetype = str(fil.file_type)
self.create_date = fil.create_date
self.write_date = fil.write_date or fil.create_date
self.content_length = fil.file_size
self.displayname = fil.name
self.uidperms = 14
if parent:
if not parent.check_perms('x'):
self.uidperms = 0
elif not parent.check_perms('w'):
self.uidperms = 4
try:
self.uuser = (fil.user_id and fil.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(fil.company_id and fil.company_id.name, default='nogroup')
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
if self.dctx != other.dctx:
return False
return self.file_id == other.file_id
def open_data(self, cr, mode):
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.get_file(cr, self.context.uid, None, self, mode=mode, context=self.context.context)
def rm(self, cr):
uid = self.context.uid
if not self.check_perms(8):
raise IOError(errno.EPERM, "Permission denied.")
document_obj = self.context._dirobj.pool.get('ir.attachment')
if self.type in ('collection','database'):
return False
document = document_obj.browse(cr, uid, self.file_id, context=self.context.context)
res = False
if document and document._name == 'ir.attachment':
res = document_obj.unlink(cr, uid, [document.id])
return res
def fix_ppath(self, cr, fbro):
"""Sometimes we may init this w/o path, parent.
This function fills the missing path from the file browse object
Note: this may be an expensive operation, do on demand. However,
once caching is in, we might want to do that at init time and keep
this object anyway
"""
if self.path or self.parent:
return
assert fbro
uid = self.context.uid
dirpath = []
if fbro.parent_id:
dirobj = self.context._dirobj.pool.get('document.directory')
dirpath = dirobj.get_full_path(cr, uid, fbro.parent_id.id, context=self.context.context)
if fbro.datas_fname:
dirpath.append(fbro.datas_fname)
else:
dirpath.append(fbro.name)
if len(dirpath)>1:
self.path = dirpath
else:
self.path = dirpath[0]
def get_data(self, cr, fil_obj=None):
""" Retrieve the data for some file.
fil_obj may optionally be specified, and should be a browse object
for the file. This is useful when the caller has already initiated
the browse object. """
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.get_data(cr, self.context.uid, None, self,self.context.context, fil_obj)
def get_data_len(self, cr, fil_obj=None):
bin_size = self.context.context.get('bin_size', False)
if bin_size and not self.content_length:
self.content_length = fil_obj.db_datas
return self.content_length
def set_data(self, cr, data, fil_obj=None):
""" Store data at some file.
fil_obj may optionally be specified, and should be a browse object
for the file. This is useful when the caller has already initiated
the browse object. """
if not self.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.set_data(cr, self.context.uid, None, self, data, self.context.context, fil_obj)
def _get_ttag(self, cr):
return 'file-%d' % self.file_id
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
if ndir_node and ndir_node.context != self.context:
raise NotImplementedError("Cannot move files between contexts.")
if (not self.check_perms(8)) and ndir_node.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
doc_obj = self.context._dirobj.pool.get('ir.attachment')
if not fil_obj:
dbro = doc_obj.browse(cr, self.context.uid, self.file_id, context=self.context.context)
else:
dbro = fil_obj
assert dbro.id == self.file_id, "%s != %s for %r." % (dbro.id, self.file_id, self)
if not dbro:
raise IndexError("Cannot locate doc %d.", self.file_id)
if (not self.parent):
# there *must* be a parent node for this one
self.parent = self.context.get_dir_node(cr, dbro.parent_id)
assert self.parent
ret = {}
if ndir_node and self.parent != ndir_node:
if not (isinstance(self.parent, node_dir) and isinstance(ndir_node, node_dir)):
_logger.debug('Cannot move file %r from %r to %r.', self, self.parent, ndir_node)
raise NotImplementedError('Cannot move files between dynamic folders.')
if not ndir_obj:
ndir_obj = self.context._dirobj.browse(cr, self.context.uid, \
ndir_node.dir_id, context=self.context.context)
assert ndir_obj.id == ndir_node.dir_id
r2 = { 'parent_id': ndir_obj.id }
ret.update(r2)
if new_name and (new_name != dbro.name):
if len(ret):
raise NotImplementedError("Cannot rename and move.") # TODO
r2 = { 'name': new_name, 'datas_fname': new_name }
ret.update(r2)
del dbro
if not in_write:
# We have to update the data ourselves
if ret:
ctx = self.context.context.copy()
ctx['__from_node'] = True
doc_obj.write(cr, self.context.uid, [self.file_id,], ret, ctx )
ret = True
return ret
class node_content(node_class):
our_type = 'content'
def __init__(self, path, parent, context, cnt, dctx=None, act_id=None):
super(node_content,self).__init__(path, parent,context)
self.cnt_id = cnt.id
self.create_date = False
self.write_date = False
self.content_length = False
self.unixperms = 0640
if parent:
self.uidperms = parent.uidperms & 14
self.uuser = parent.uuser
self.ugroup = parent.ugroup
self.extension = cnt.extension
self.report_id = cnt.report_id and cnt.report_id.id
#self.mimetype = cnt.extension.
self.displayname = path
if dctx:
self.dctx.update(dctx)
self.act_id = act_id
def fill_fields(self, cr, dctx=None):
""" Try to read the object and fill missing fields, like mimetype,
dates etc.
This function must be different from the constructor, because
it uses the db cursor.
"""
cr.execute('SELECT DISTINCT mimetype FROM document_directory_content_type WHERE active AND code = %s;',
(self.extension,))
res = cr.fetchall()
if res and res[0][0]:
self.mimetype = str(res[0][0])
def get_data(self, cr, fil_obj=None):
cntobj = self.context._dirobj.pool.get('document.directory.content')
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
data = cntobj.process_read(cr, self.context.uid, self, ctx)
if data:
self.content_length = len(data)
return data
def open_data(self, cr, mode):
if mode.endswith('b'):
mode = mode[:-1]
if mode in ('r', 'w'):
cperms = mode[:1]
elif mode in ('r+', 'w+'):
cperms = 'rw'
else:
raise IOError(errno.EINVAL, "Cannot open at mode %s." % mode)
if not self.check_perms(cperms):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
return nodefd_content(self, cr, mode, ctx)
def get_data_len(self, cr, fil_obj=None):
# FIXME : here, we actually generate the content twice!!
# we should have cached the generated content, but it is
# not advisable to do keep it in memory, until we have a cache
# expiration logic.
if not self.content_length:
self.get_data(cr,fil_obj)
return self.content_length
def set_data(self, cr, data, fil_obj=None):
cntobj = self.context._dirobj.pool.get('document.directory.content')
if not self.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
return cntobj.process_write(cr, self.context.uid, self, data, ctx)
def _get_ttag(self, cr):
return 'cnt-%d%s' % (self.cnt_id,(self.act_id and ('-' + str(self.act_id))) or '')
def get_dav_resourcetype(self, cr):
return ''
class node_descriptor(object):
"""A file-like interface to the data contents of a node.
This class is NOT a node, but an /open descriptor/ for some
node. It can hold references to a cursor or a file object,
because the life of a node_descriptor will be the open period
of the data.
It should also take care of locking, with any native mechanism
or using the db.
For the implementation, it would be OK just to wrap around file,
StringIO or similar class. The node_descriptor is only needed to
provide the link to the parent /node/ object.
"""
def __init__(self, parent):
assert isinstance(parent, node_class)
self.name = parent.displayname
self.__parent = parent
def _get_parent(self):
return self.__parent
def open(self, **kwargs):
raise NotImplementedError
def close(self):
raise NotImplementedError
def read(self, size=None):
raise NotImplementedError
def seek(self, offset, whence=None):
raise NotImplementedError
def tell(self):
raise NotImplementedError
def write(self, str):
raise NotImplementedError
def size(self):
raise NotImplementedError
def __len__(self):
return self.size()
def __nonzero__(self):
""" Ensure that a node_descriptor will never equal False
Since we do define __len__ and __iter__ for us, we must avoid
being regarded as non-true objects.
"""
return True
def next(self, str):
raise NotImplementedError
class nodefd_content(StringIO, node_descriptor):
""" A descriptor to content nodes
"""
def __init__(self, parent, cr, mode, ctx):
node_descriptor.__init__(self, parent)
self._context=ctx
self._size = 0L
if mode in ('r', 'r+'):
cntobj = parent.context._dirobj.pool.get('document.directory.content')
data = cntobj.process_read(cr, parent.context.uid, parent, ctx)
if data:
self._size = len(data)
parent.content_length = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
if self.mode == 'r':
StringIO.close(self)
return
par = self._get_parent()
uid = par.context.uid
cr = openerp.registry(par.context.dbname).cursor()
try:
if self.mode in ('w', 'w+', 'r+'):
data = self.getvalue()
cntobj = par.context._dirobj.pool.get('document.directory.content')
cntobj.process_write(cr, uid, par, data, par.context.context)
elif self.mode == 'a':
raise NotImplementedError
cr.commit()
except Exception:
_logger.exception('Cannot update db content #%d for close.', par.cnt_id)
raise
finally:
cr.close()
StringIO.close(self)
class nodefd_static(StringIO, node_descriptor):
""" A descriptor to nodes with static data.
"""
def __init__(self, parent, cr, mode, ctx=None):
node_descriptor.__init__(self, parent)
self._context=ctx
self._size = 0L
if mode in ('r', 'r+'):
data = parent.get_data(cr)
if data:
self._size = len(data)
parent.content_length = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
if self.mode == 'r':
StringIO.close(self)
return
par = self._get_parent()
# uid = par.context.uid
cr = openerp.registry(par.context.dbname).cursor()
try:
if self.mode in ('w', 'w+', 'r+'):
data = self.getvalue()
par.set_data(cr, data)
elif self.mode == 'a':
raise NotImplementedError
cr.commit()
except Exception:
_logger.exception('Cannot update db content #%d for close.', par.cnt_id)
raise
finally:
cr.close()
StringIO.close(self)
class nodefd_db(StringIO, node_descriptor):
""" A descriptor to db data
"""
def __init__(self, parent, ira_browse, mode):
node_descriptor.__init__(self, parent)
self._size = 0L
if mode.endswith('b'):
mode = mode[:-1]
if mode in ('r', 'r+'):
data = ira_browse.datas
if data:
data = data.decode('base64')
self._size = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
par = self._get_parent()
# uid = par.context.uid
registry = openerp.modules.registry.RegistryManager.get(par.context.dbname)
with registry.cursor() as cr:
data = self.getvalue().encode('base64')
if self.mode in ('w', 'w+', 'r+'):
registry.get('ir.attachment').write(cr, 1, par.file_id, {'datas': data})
cr.commit()
StringIO.close(self)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -747,998,132,298,467,500 | 38.634898 | 348 | 0.576468 | false | 3.83247 | false | false | false | 0.005371 |
tistaharahap/images-dhash | deps/Imaging-1.1.7/PIL/PcdImagePlugin.py | 40 | 1773 | #
# The Python Imaging Library.
# $Id$
#
# PCD file handling
#
# History:
# 96-05-10 fl Created
# 96-05-27 fl Added draft mode (128x192, 256x384)
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import Image, ImageFile
##
# Image plugin for PhotoCD images. This plugin only reads the 768x512
# image from the file; higher resolutions are encoded in a proprietary
# encoding.
class PcdImageFile(ImageFile.ImageFile):
format = "PCD"
format_description = "Kodak PhotoCD"
def _open(self):
# rough
self.fp.seek(2048)
s = self.fp.read(2048)
if s[:4] != "PCD_":
raise SyntaxError, "not a PCD file"
orientation = ord(s[1538]) & 3
if orientation == 1:
self.tile_post_rotate = 90 # hack
elif orientation == 3:
self.tile_post_rotate = -90
self.mode = "RGB"
self.size = 768, 512 # FIXME: not correct for rotated images!
self.tile = [("pcd", (0,0)+self.size, 96*2048, None)]
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
if size:
scale = max(self.size[0] / size[0], self.size[1] / size[1])
for s, o in [(4,0*2048), (2,0*2048), (1,96*2048)]:
if scale >= s:
break
# e = e[0], e[1], (e[2]-e[0]+s-1)/s+e[0], (e[3]-e[1]+s-1)/s+e[1]
# self.size = ((self.size[0]+s-1)/s, (self.size[1]+s-1)/s)
self.tile = [(d, e, o, a)]
return self
#
# registry
Image.register_open("PCD", PcdImageFile)
Image.register_extension("PCD", ".pcd")
| mit | 2,325,806,690,208,984,600 | 22.328947 | 76 | 0.544839 | false | 3.005085 | false | false | false | 0.00564 |
EducationalTestingService/discourse-parsing | rstfinder/collapse_rst_labels.py | 1 | 4904 | #!/usr/bin/env python
"""
Collapse RST discourse treebank relation types.
This script is based on a Perl script by Kenji Sagae.
:author: Michael Heilman
:author: Nitin Madnani
:organization: ETS
"""
import argparse
import re
from nltk.tree import ParentedTree
from .reformat_rst_trees import reformat_rst_tree
from .tree_util import TREE_PRINT_MARGIN
def collapse_rst_labels(tree):
"""
Collapse the RST labels to a smaller set.
This function collapses the RST labels to the set of 18 described
by the Carlson et al. paper that comes with the RST discourse treebank.
**IMPORTANT**: The input tree is modified tree in place.
Parameters
----------
tree : nltk.tree.ParentedTree
The input tree for which to collapse the labels.
"""
# walk the tree, and collapse the labels for each subtree
for subtree in tree.subtrees():
subtree.set_label(_collapse_rst_label(subtree.label()))
def _collapse_rst_label(label):
"""
Collapse the given label to a smaller set.
Parameters
----------
label : str
The label to be collapsed.
Returns
-------
collapsed_label : str
The collapsed label.
Raises
------
ValueError
If the relation type in the input label is unknown.
"""
if not re.search(':', label):
return label
# split the input label into direction and relation
direction, relation = label.split(':')
# lowercase the relation before collapsing
relation_uncased = relation.lower()
# go through the various relation types and collapse them as needed
if re.search(r'^attribution', relation_uncased):
relation = "ATTRIBUTION"
elif re.search(r'^(background|circumstance)', relation_uncased):
relation = "BACKGROUND"
elif re.search(r'^(cause|result|consequence)', relation_uncased):
relation = "CAUSE"
elif re.search(r'^(comparison|preference|analogy|proportion)',
relation_uncased):
relation = "COMPARISON"
elif re.search(r'^(condition|hypothetical|contingency|otherwise)',
relation_uncased):
relation = "CONDITION"
elif re.search(r'^(contrast|concession|antithesis)', relation_uncased):
relation = "CONTRAST"
elif re.search(r'^(elaboration.*|example|definition)', relation_uncased):
relation = "ELABORATION"
elif re.search(r'^(purpose|enablement)', relation_uncased):
relation = "ENABLEMENT"
elif re.search(r'^(problem\-solution|question\-answer|statement\-response|topic\-comment|comment\-topic|rhetorical\-question)', relation_uncased):
relation = "TOPICCOMMENT"
elif re.search(r'^(evaluation|interpretation|conclusion|comment)',
relation_uncased):
# note that this check for "comment" needs to come after the one
# above that looks for "comment-topic"
relation = "EVALUATION"
elif re.search(r'^(evidence|explanation.*|reason)', relation_uncased):
relation = "EXPLANATION"
elif re.search(r'^(list|disjunction)', relation_uncased):
relation = "JOINT"
elif re.search(r'^(manner|means)', relation_uncased):
relation = "MANNERMEANS"
elif re.search(r'^(summary|restatement)', relation_uncased):
relation = "SUMMARY"
elif re.search(r'^(temporal\-.*|sequence|inverted\-sequence)',
relation_uncased):
relation = "TEMPORAL"
elif re.search(r'^(topic-.*)', relation_uncased):
relation = "TOPICCHANGE"
elif re.search(r'^(span|same\-unit|textualorganization)$', relation_uncased):
pass
else:
raise ValueError(f"unknown relation type in label: {label}")
# TODO: make this all upper case (to resemble PTB nonterminals)?
collapsed_label = f"{direction}:{relation}".lower()
return collapsed_label
def main(): # noqa: D103
parser = argparse.ArgumentParser(description="Note that this main "
"method is just for testing.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input_path",
help="Path to an RST discourse treebank .dis file.")
parser.add_argument("output_path",
help="Path to file containing the collapsed output.")
args = parser.parse_args()
# open the input file containing each input tree on a single line,
# collapse it, and then print it out to the given output file
with open(args.input_path, 'r') as input_file,\
open(args.output_path, 'w') as output_file:
tree = ParentedTree.fromstring(input_file.read().strip())
reformat_rst_tree(tree)
collapse_rst_labels(tree)
tree.pprint(margin=TREE_PRINT_MARGIN, file=output_file)
if __name__ == "__main__":
main()
| mit | -3,973,364,285,783,336,000 | 30.435897 | 150 | 0.641925 | false | 4.036214 | false | false | false | 0.000612 |
aladdinwang/django-cms | cms/utils/urlutils.py | 16 | 1750 | # -*- coding: utf-8 -*-
from django.conf import settings
from urlparse import urlparse
import re
# checks validity of absolute / relative url
any_path_re = re.compile('^/?[a-zA-Z0-9_.-]+(/[a-zA-Z0-9_.-]+)*/?$')
def levelize_path(path):
"""Splits given path to list of paths removing latest level in each step.
>>> path = '/application/item/new'
>>> levelize_path(path)
['/application/item/new', '/application/item', '/application']
"""
parts = path.rstrip("/").split("/")
paths = []
for i in range(len(parts), 0, -1):
sub_path = ('/').join(parts[:i])
if sub_path:
paths.append(sub_path)
return paths
def urljoin(*segments):
"""Joins url segments together and appends trailing slash if required.
>>> urljoin('a', 'b', 'c')
u'a/b/c/'
>>> urljoin('a', '//b//', 'c')
u'a/b/c/'
>>> urljoin('/a', '/b/', '/c/')
u'/a/b/c/'
>>> urljoin('/a', '')
u'/a/'
"""
cleaned_segments = map(lambda segment: unicode(segment).strip("/"), segments)
nonempty_segments = filter(lambda segment: segment > "", cleaned_segments)
url = ("/").join(nonempty_segments)
if segments[0].startswith("/") and not url.startswith("/"):
url = "/" + url
if settings.APPEND_SLASH and not url.endswith("/"):
url += "/"
return url
def is_media_request(request):
"""
Check if a request is a media request.
"""
parsed_media_url = urlparse(settings.MEDIA_URL)
if request.path.startswith(parsed_media_url.path):
if parsed_media_url.netloc:
if request.get_host() == parsed_media_url.netloc:
return True
else:
return True
return False | bsd-3-clause | 6,211,086,250,955,784,000 | 27.704918 | 81 | 0.561714 | false | 3.600823 | false | false | false | 0.008 |
ballotpath/BallotPath | api/app/viewsd/district.py | 1 | 3459 | #***********************************************************************************************************
# Copyright BallotPath 2014
# Developed by Matt Clyde, Andrew Erland, Shawn Forgie, Andrew Hobbs, Kevin Mark, Darrell Sam, Blake Clough
# Open source under GPL v3 license (https://github.com/mclyde/BallotPath/blob/v0.3/LICENSE)
#***********************************************************************************************************
from flask import render_template, flash, redirect, url_for, jsonify, Response
from app import app, db, models
import json
# District:
@app.route("/district/", methods = ['GET'])
def get_districts():
districts = models.district.query.all()
district_dicts = []
for district in districts:
district_dicts.append(dict(district.__dict__))
for district_dict in district_dicts:
del district_dict['_sa_instance_state']
resp = Response(json.dumps(district_dicts), status=200, mimetype='application/json')
if district_dicts == []:
resp.status_code = 404
return resp
@app.route("/district/<int:district_id>/", methods = ['GET'])
def get_district(district_id):
district = models.district.query.get(district_id)
if district == None:
return Response(json.dumps(None), status=404, mimetype='application/json')
else:
district_dict = dict(district.__dict__)
del district_dict['_sa_instance_state']
return Response(json.dumps(district_dict), status=200, mimetype='application/json')
@app.route("/district/<int:district_id>/", methods = ['POST'])
def post_district(district_id):
# First make sure we got a JSON object with the request
if request.headers['Content-Type'] == 'application/json':
json = request.json
district = models.district(name=json['name'], level_id=json['level_id'], election_div_id=json['election_div_id'])
# db.session.add(district)
# db.session.commit()
return Response("", status=200)
else:
# If we don't have one, about with HTML code 415
# 10.4.16 415 Unsupported Media Type
# The server is refusing to service the request because the
# entity of the request is in a format not supported by the
# requested resource for the requested method.
abort(415)
@app.route("/district/<int:district_id>/", methods = ['PUT'])
def put_district(district_id):
if request.headers['Content-Type'] == 'application/json':
json = request.json
district = models.district.get(district_id)
# Only update if the office already exists, otherwise do nothing
if district != None:
district.name=json['name']
district.level_id=json['level_id']
district.election_div_id=json['election_div_id']
# db.session.commit()
return Response("", status=200)
else:
return Response("", status=404)
return Response("", status=200)
else:
abort(415)
@app.route("/district/<int:district_id>/", methods = ['DELETE'])
def delete_district(district_id):
district = models.district.get(district_id);
if(district == None):
# Give a 404 error if an item with the given ID doesn't exist
abort(404)
else:
# db.session.delete(district)
# db.session.commit()
return Response("", status=200) | gpl-3.0 | 6,258,518,638,483,848,000 | 41.810127 | 121 | 0.593235 | false | 3.92622 | false | false | false | 0.010408 |
alpinelinux/linux-stable-grsec | tools/perf/scripts/python/syscall-counts-by-pid.py | 1996 | 2105 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 | -5,189,957,477,307,039,000 | 27.445946 | 77 | 0.625178 | false | 3.028777 | false | false | false | 0.033729 |
NonnEmilia/OpenGenfri | pos/webpos/dbmanager.py | 2 | 2516 | from django.contrib.auth.models import User
from webpos.models import Item, Bill, BillItem
def commit_bill(output, reqdata, user):
billhd = Bill(customer_name=reqdata['customer_name'],
server=User.objects.get(pk=user.id).username)
billitms = []
reqquants = reqdata['items']
dbitms = Item.objects.filter(name__in=reqquants.keys())
for dbitm in dbitms:
reqitem = reqquants[dbitm.name]
quant = reqitem['qty']
notes = reqitem['notes']
db_quant = dbitm.quantity
if db_quant is not None:
newquant = db_quant - quant
if newquant < 0:
output['errors'].append((dbitm.name, dbitm.quantity))
else:
if output['errors']:
continue
output['total'] += dbitm.price * quant
billitms.append(BillItem(item=dbitm, quantity=quant,
category=dbitm.category,
item_price=dbitm.price,
note=notes))
dbitm.quantity = newquant
else:
output['total'] += dbitm.price * quant
billitms.append(BillItem(item=dbitm, quantity=quant,
category=dbitm.category,
item_price=dbitm.price,
note=notes))
if output['errors']:
output['total'] = 0
output['customer_id'] = None
output['errors'] = dict(output['errors'])
return output, None
else:
output['errors'] = dict(output['errors'])
if output['total'] < 0:
output['total'] = 0
billhd.total = output['total']
billhd.customer_id = output['customer_id']
billhd.save()
output['date'] = billhd.date
output['bill_id'] = billhd.id
for billitm, dbitm in zip(billitms, dbitms):
billitm.bill = billhd
billitm.save()
dbitm.save()
return output, billhd
def undo_bill(billid, user):
bill = Bill.objects.get(pk=billid)
if not bill.is_committed():
return 'Bill has already been deleted!'
for billitem in bill.billitem_set.all():
if billitem.item.quantity is not None:
billitem.item.quantity += billitem.quantity
billitem.item.save()
bill.deleted_by = user.username
bill.save()
return 'Bill #' + billid + ' deleted!'
| mit | -3,350,054,980,190,896,600 | 36 | 69 | 0.529809 | false | 3.858896 | false | false | false | 0.000397 |
dongjoon-hyun/tensorflow | tensorflow/python/ops/partitioned_variables.py | 4 | 12564 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for creating partitioned variables.
This is a convenient abstraction to partition a large variable across
multiple smaller variables that can be assigned to different devices.
The full variable can be reconstructed by concatenating the smaller variables.
Using partitioned variables instead of a single variable is mostly a
performance choice. It however also has an impact on:
1. Random initialization, as the random number generator is called once per
slice
2. Updates, as they happen in parallel across slices
A key design goal is to allow a different graph to repartition a variable
with the same name but different slicings, including possibly no partitions.
TODO(touts): If an initializer provides a seed, the seed must be changed
deterministically for each slice, maybe by adding one to it, otherwise each
slice will use the same values. Maybe this can be done by passing the
slice offsets to the initializer functions.
Typical usage:
```python
# Create a list of partitioned variables with:
vs = create_partitioned_variables(
<shape>, <slicing>, <initializer>, name=<optional-name>)
# Pass the list as inputs to embedding_lookup for sharded, parallel lookup:
y = embedding_lookup(vs, ids, partition_strategy="div")
# Or fetch the variables in parallel to speed up large matmuls:
z = matmul(x, concat(slice_dim, vs))
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
@tf_export("variable_axis_size_partitioner")
def variable_axis_size_partitioner(
max_shard_bytes, axis=0, bytes_per_string_element=16, max_shards=None):
"""Get a partitioner for VariableScope to keep shards below `max_shard_bytes`.
This partitioner will shard a Variable along one axis, attempting to keep
the maximum shard size below `max_shard_bytes`. In practice, this is not
always possible when sharding along only one axis. When this happens,
this axis is sharded as much as possible (i.e., every dimension becomes
a separate shard).
If the partitioner hits the `max_shards` limit, then each shard may end up
larger than `max_shard_bytes`. By default `max_shards` equals `None` and no
limit on the number of shards is enforced.
One reasonable value for `max_shard_bytes` is `(64 << 20) - 1`, or almost
`64MB`, to keep below the protobuf byte limit.
Args:
max_shard_bytes: The maximum size any given shard is allowed to be.
axis: The axis to partition along. Default: outermost axis.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
max_shards: The maximum number of shards in int created taking precedence
over `max_shard_bytes`.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
Raises:
ValueError: If any of the byte counts are non-positive.
"""
if max_shard_bytes < 1 or bytes_per_string_element < 1:
raise ValueError(
"Both max_shard_bytes and bytes_per_string_element must be positive.")
if max_shards and max_shards < 1:
raise ValueError(
"max_shards must be positive.")
def _partitioner(shape, dtype):
"""Partitioner that partitions shards to have max_shard_bytes total size.
Args:
shape: A `TensorShape`.
dtype: A `DType`.
Returns:
A tuple representing how much to slice each axis in shape.
Raises:
ValueError: If shape is not a fully defined `TensorShape` or dtype is not
a `DType`.
"""
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("shape is not a TensorShape: %s" % shape)
if not shape.is_fully_defined():
raise ValueError("shape is not fully defined: %s" % shape)
if not isinstance(dtype, dtypes.DType):
raise ValueError("dtype is not a DType: %s" % dtype)
if dtype.base_dtype == dtypes.string:
element_size = bytes_per_string_element
else:
element_size = dtype.size
partitions = [1] * shape.ndims
bytes_per_slice = 1.0 * (
shape.num_elements() / shape.dims[axis].value) * element_size
# How many slices can we fit on one shard of size at most max_shard_bytes?
# At least one slice is required.
slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))
# How many shards do we need for axis given that each shard fits
# slices_per_shard slices from a total of shape[axis] slices?
axis_shards = int(math.ceil(
1.0 * shape.dims[axis].value / slices_per_shard))
if max_shards:
axis_shards = min(max_shards, axis_shards)
partitions[axis] = axis_shards
return partitions
return _partitioner
@tf_export("min_max_variable_partitioner")
def min_max_variable_partitioner(max_partitions=1, axis=0,
min_slice_size=256 << 10,
bytes_per_string_element=16):
"""Partitioner to allocate minimum size per slice.
Returns a partitioner that partitions the variable of given shape and dtype
such that each partition has a minimum of `min_slice_size` slice of the
variable. The maximum number of such partitions (upper bound) is given by
`max_partitions`.
Args:
max_partitions: Upper bound on the number of partitions. Defaults to 1.
axis: Axis along which to partition the variable. Defaults to 0.
min_slice_size: Minimum size of the variable slice per partition. Defaults
to 256K.
bytes_per_string_element: If the `Variable` is of type string, this provides
an estimate of how large each scalar in the `Variable` is.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
"""
def _partitioner(shape, dtype):
"""Partitioner that partitions list for a variable of given shape and type.
Ex: Consider partitioning a variable of type float32 with
shape=[1024, 1024].
If `max_partitions` >= 16, this function would return
[(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].
If `max_partitions` < 16, this function would return
[`max_partitions`, 1].
Args:
shape: Shape of the variable.
dtype: Type of the variable.
Returns:
List of partitions for each axis (currently only one axis can be
partitioned).
Raises:
ValueError: If axis to partition along does not exist for the variable.
"""
if axis >= len(shape):
raise ValueError("Can not partition variable along axis %d when shape is "
"only %s" % (axis, shape))
if dtype.base_dtype == dtypes.string:
bytes_per_element = bytes_per_string_element
else:
bytes_per_element = dtype.size
total_size_bytes = shape.num_elements() * bytes_per_element
partitions = total_size_bytes / min_slice_size
partitions_list = [1] * len(shape)
# We can not partition the variable beyond what its shape or
# `max_partitions` allows.
partitions_list[axis] = max(1, min(shape.dims[axis].value,
max_partitions,
int(math.ceil(partitions))))
return partitions_list
return _partitioner
@tf_export("fixed_size_partitioner")
def fixed_size_partitioner(num_shards, axis=0):
"""Partitioner to specify a fixed number of shards along given axis.
Args:
num_shards: `int`, number of shards to partition variable.
axis: `int`, axis to partition on.
Returns:
A partition function usable as the `partitioner` argument to
`variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
"""
def _partitioner(shape, **unused_args):
partitions_list = [1] * len(shape)
partitions_list[axis] = min(num_shards, shape.dims[axis].value)
return partitions_list
return _partitioner
@tf_export("create_partitioned_variables")
def create_partitioned_variables(
shape, slicing, initializer, dtype=dtypes.float32,
trainable=True, collections=None, name=None, reuse=None):
"""Create a list of partitioned variables according to the given `slicing`.
Currently only one dimension of the full variable can be sliced, and the
full variable can be reconstructed by the concatenation of the returned
list along that dimension.
Args:
shape: List of integers. The shape of the full variable.
slicing: List of integers. How to partition the variable.
Must be of the same length as `shape`. Each value
indicate how many slices to create in the corresponding
dimension. Presently only one of the values can be more than 1;
that is, the variable can only be sliced along one dimension.
For convenience, The requested number of partitions does not have to
divide the corresponding dimension evenly. If it does not, the
shapes of the partitions are incremented by 1 starting from partition
0 until all slack is absorbed. The adjustment rules may change in the
future, but as you can save/restore these variables with different
slicing specifications this should not be a problem.
initializer: A `Tensor` of shape `shape` or a variable initializer
function. If a function, it will be called once for each slice,
passing the shape and data type of the slice as parameters. The
function must return a tensor with the same shape as the slice.
dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
trainable: If True also add all the variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
collections: List of graph collections keys to add the variables to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name for the full variable. Defaults to
`"PartitionedVariable"` and gets uniquified automatically.
reuse: Boolean or `None`; if `True` and name is set, it would reuse
previously created variables. if `False` it will create new variables.
if `None`, it would inherit the parent scope reuse.
Returns:
A list of Variables corresponding to the slicing.
Raises:
ValueError: If any of the arguments is malformed.
"""
logging.warn(
"create_partitioned_variables is deprecated. Use "
"tf.get_variable with a partitioner set, or "
"tf.get_partitioned_variable_list, instead.")
if len(shape) != len(slicing):
raise ValueError("The 'shape' and 'slicing' of a partitioned Variable "
"must have the length: shape: %s, slicing: %s" %
(shape, slicing))
if len(shape) < 1:
raise ValueError("A partitioned Variable must have rank at least 1: "
"shape: %s" % shape)
# Legacy: we are provided the slicing directly, so just pass it to
# the partitioner.
partitioner = lambda **unused_kwargs: slicing
with variable_scope.variable_scope(
name, "PartitionedVariable", reuse=reuse):
# pylint: disable=protected-access
partitioned_var = variable_scope._get_partitioned_variable(
name=None,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=trainable,
partitioner=partitioner,
collections=collections)
return list(partitioned_var)
# pylint: enable=protected-access
| apache-2.0 | 5,044,320,470,968,934,000 | 39.012739 | 80 | 0.697628 | false | 4.134255 | false | false | false | 0.002865 |
mandeepdhami/horizon | openstack_dashboard/dashboards/project/containers/views.py | 48 | 13326 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Swift containers.
"""
import os
import django
from django import http
from django.utils.functional import cached_property # noqa
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from horizon import browsers
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon.utils.urlresolvers import reverse # noqa
from openstack_dashboard import api
from openstack_dashboard.api import swift
from openstack_dashboard.dashboards.project.containers \
import browsers as project_browsers
from openstack_dashboard.dashboards.project.containers \
import forms as project_forms
from openstack_dashboard.dashboards.project.containers import utils
class ContainerView(browsers.ResourceBrowserView):
browser_class = project_browsers.ContainerBrowser
template_name = "project/containers/index.html"
def get_containers_data(self):
containers = []
self._more = None
marker = self.request.GET.get('marker', None)
try:
containers, self._more = api.swift.swift_get_containers(
self.request, marker=marker)
except Exception:
msg = _('Unable to retrieve container list.')
exceptions.handle(self.request, msg)
return containers
@cached_property
def objects(self):
"""Returns a list of objects given the subfolder's path.
The path is from the kwargs of the request.
"""
objects = []
self._more = None
marker = self.request.GET.get('marker', None)
container_name = self.kwargs['container_name']
subfolder = self.kwargs['subfolder_path']
prefix = None
if container_name:
self.navigation_selection = True
if subfolder:
prefix = subfolder
try:
objects, self._more = api.swift.swift_get_objects(
self.request,
container_name,
marker=marker,
prefix=prefix)
except Exception:
self._more = None
objects = []
msg = _('Unable to retrieve object list.')
exceptions.handle(self.request, msg)
return objects
def is_subdir(self, item):
content_type = "application/pseudo-folder"
return getattr(item, "content_type", None) == content_type
def is_placeholder(self, item):
object_name = getattr(item, "name", "")
return object_name.endswith(api.swift.FOLDER_DELIMITER)
def get_objects_data(self):
"""Returns a list of objects within the current folder."""
filtered_objects = [item for item in self.objects
if (not self.is_subdir(item) and
not self.is_placeholder(item))]
return filtered_objects
def get_subfolders_data(self):
"""Returns a list of subfolders within the current folder."""
filtered_objects = [item for item in self.objects
if self.is_subdir(item)]
return filtered_objects
def get_context_data(self, **kwargs):
context = super(ContainerView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
context['subfolders'] = []
if self.kwargs["subfolder_path"]:
(parent, slash, folder) = self.kwargs["subfolder_path"] \
.strip('/').rpartition('/')
while folder:
path = "%s%s%s/" % (parent, slash, folder)
context['subfolders'].insert(0, (folder, path))
(parent, slash, folder) = parent.rpartition('/')
return context
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateContainer
template_name = 'project/containers/create.html'
success_url = "horizon:project:containers:index"
page_title = _("Create Container")
def get_success_url(self):
parent = self.request.POST.get('parent', None)
if parent:
container, slash, remainder = parent.partition(
swift.FOLDER_DELIMITER)
args = (utils.wrap_delimiter(container),
utils.wrap_delimiter(remainder))
return reverse(self.success_url, args=args)
else:
container = utils.wrap_delimiter(self.request.POST['name'])
return reverse(self.success_url, args=[container])
def get_initial(self):
initial = super(CreateView, self).get_initial()
initial['parent'] = self.kwargs['container_name']
return initial
class CreatePseudoFolderView(forms.ModalFormView):
form_class = project_forms.CreatePseudoFolder
template_name = 'project/containers/create_pseudo_folder.html'
success_url = "horizon:project:containers:index"
page_title = _("Create Pseudo-folder")
def get_success_url(self):
container_name = self.request.POST['container_name']
return reverse(self.success_url,
args=(utils.wrap_delimiter(container_name),
self.request.POST.get('path', '')))
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs['subfolder_path']}
def get_context_data(self, **kwargs):
context = super(CreatePseudoFolderView, self). \
get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
return context
class UploadView(forms.ModalFormView):
form_class = project_forms.UploadObject
template_name = 'project/containers/upload.html'
success_url = "horizon:project:containers:index"
page_title = _("Upload Objects")
def get_success_url(self):
container = utils.wrap_delimiter(self.request.POST['container_name'])
path = utils.wrap_delimiter(self.request.POST.get('path', ''))
args = (container, path)
return reverse(self.success_url, args=args)
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs['subfolder_path']}
def get_context_data(self, **kwargs):
context = super(UploadView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
return context
def object_download(request, container_name, object_path):
try:
obj = api.swift.swift_get_object(request, container_name, object_path,
resp_chunk_size=swift.CHUNK_SIZE)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(request,
_("Unable to retrieve object."),
redirect=redirect)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_path.rsplit(swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
# NOTE(tsufiev): StreamingHttpResponse class had been introduced in
# Django 1.5 specifically for the purpose streaming and/or transferring
# large files, it's less fragile than standard HttpResponse and should be
# used when available.
if django.VERSION >= (1, 5):
response = http.StreamingHttpResponse(obj.data)
else:
response = http.HttpResponse(obj.data)
safe_name = filename.replace(",", "").encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe_name
response['Content-Type'] = 'application/octet-stream'
response['Content-Length'] = obj.bytes
return response
class CopyView(forms.ModalFormView):
form_class = project_forms.CopyObject
template_name = 'project/containers/copy.html'
success_url = "horizon:project:containers:index"
page_title = _("Copy Object")
def get_success_url(self):
container = utils.wrap_delimiter(
self.request.POST['new_container_name'])
path = utils.wrap_delimiter(self.request.POST.get('path', ''))
args = (container, path)
return reverse(self.success_url, args=args)
def get_form_kwargs(self):
kwargs = super(CopyView, self).get_form_kwargs()
try:
containers = api.swift.swift_get_containers(self.request)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to list containers.'),
redirect=redirect)
kwargs['containers'] = [(c.name, c.name) for c in containers[0]]
return kwargs
@staticmethod
def get_copy_name(object_name):
filename, ext = os.path.splitext(object_name)
return "%s.copy%s" % (filename, ext)
def get_initial(self):
path = self.kwargs["subfolder_path"]
object_name = self.kwargs["object_name"]
orig = "%s%s" % (path or '', object_name)
return {"new_container_name": self.kwargs["container_name"],
"orig_container_name": self.kwargs["container_name"],
"orig_object_name": orig,
"path": path,
"new_object_name": self.get_copy_name(object_name)}
def get_context_data(self, **kwargs):
context = super(CopyView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
context['object_name'] = self.kwargs["object_name"]
return context
class ContainerDetailView(forms.ModalFormMixin, generic.TemplateView):
template_name = 'project/containers/container_detail.html'
page_title = _("Container Details")
@memoized.memoized_method
def get_object(self):
try:
return api.swift.swift_get_container(
self.request,
self.kwargs["container_name"],
with_data=False)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to retrieve details.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ContainerDetailView, self).get_context_data(**kwargs)
context['container'] = self.get_object()
return context
class ObjectDetailView(forms.ModalFormMixin, generic.TemplateView):
template_name = 'project/containers/object_detail.html'
page_title = _("Object Details")
@memoized.memoized_method
def get_object(self):
try:
return api.swift.swift_get_object(
self.request,
self.kwargs["container_name"],
self.kwargs["object_path"],
with_data=False)
except Exception:
redirect = reverse("horizon:project:containers:index")
exceptions.handle(self.request,
_('Unable to retrieve details.'),
redirect=redirect)
def get_context_data(self, **kwargs):
context = super(ObjectDetailView, self).get_context_data(**kwargs)
context['object'] = self.get_object()
return context
class UpdateObjectView(forms.ModalFormView):
form_class = project_forms.UpdateObject
template_name = 'project/containers/update.html'
success_url = "horizon:project:containers:index"
page_title = _("Update Object")
def get_success_url(self):
container = utils.wrap_delimiter(self.request.POST['container_name'])
path = utils.wrap_delimiter(self.request.POST.get('path', ''))
args = (container, path)
return reverse(self.success_url, args=args)
def get_initial(self):
return {"container_name": self.kwargs["container_name"],
"path": self.kwargs["subfolder_path"],
"name": self.kwargs["object_name"]}
def get_context_data(self, **kwargs):
context = super(UpdateObjectView, self).get_context_data(**kwargs)
context['container_name'] = self.kwargs["container_name"]
context['subfolder_path'] = self.kwargs["subfolder_path"]
context['object_name'] = self.kwargs["object_name"]
return context
| apache-2.0 | -1,479,823,089,327,707,400 | 37.964912 | 78 | 0.623968 | false | 4.279383 | false | false | false | 0 |
rosswhitfield/mantid | scripts/Diffraction/isis_powder/pearl_routines/pearl_calibration_algs.py | 3 | 3466 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import os
import mantid.simpleapi as mantid
import isis_powder.routines.common as common
from isis_powder.routines.common_enums import INPUT_BATCHING, WORKSPACE_UNITS
def create_calibration(calibration_runs, instrument, offset_file_name, grouping_file_name, calibration_dir,
rebin_1_params, rebin_2_params, cross_correlate_params, get_det_offset_params,
output_name):
"""
Create a calibration file from (usually) a ceria run
:param calibration_runs: Run number(s) for this run
:param instrument: The PEARL instrument object
:param offset_file_name: Name of the file to write detector offset information to
:param grouping_file_name: Name of grouping calibration file
:param calibration_dir: Path to directory containing calibration information
:param rebin_1_params: Parameters for the first rebin step (as a string in the usual format)
:param rebin_2_params: Parameters for the second rebin step (as a string in the usual format)
:param cross_correlate_params: Parameters for CrossCorrelate (as a dictionary PropertyName: PropertyValue)
:param get_det_offset_params: Parameters for GetDetectorOffsets (as a dictionary PropertyName: PropertyValue)
:param output_name: The name of the focused output workspace
"""
input_ws_list = common.load_current_normalised_ws_list(run_number_string=calibration_runs, instrument=instrument,
input_batching=INPUT_BATCHING.Summed)
input_ws = input_ws_list[0]
calibration_ws = mantid.Rebin(InputWorkspace=input_ws, Params=rebin_1_params)
if calibration_ws.getAxis(0).getUnit().unitID() != WORKSPACE_UNITS.d_spacing:
calibration_ws = mantid.ConvertUnits(InputWorkspace=calibration_ws, Target="dSpacing")
rebinned = mantid.Rebin(InputWorkspace=calibration_ws, Params=rebin_2_params)
cross_correlated = mantid.CrossCorrelate(InputWorkspace=rebinned, **cross_correlate_params)
offset_file = os.path.join(calibration_dir, offset_file_name)
# Offsets workspace must be referenced as string so it can be deleted, as simpleapi doesn't recognise it as a ws
offsets_ws_name = "offsets"
mantid.GetDetectorOffsets(InputWorkspace=cross_correlated, GroupingFileName=offset_file,
OutputWorkspace=offsets_ws_name, **get_det_offset_params)
rebinned_tof = mantid.ConvertUnits(InputWorkspace=rebinned, Target="TOF")
mantid.ApplyDiffCal(InstrumentWorkspace=rebinned_tof, CalibrationFile=offset_file)
aligned = mantid.ConvertUnits(InputWorkspace=rebinned_tof, Target="dSpacing")
mantid.ApplyDiffCal(InstrumentWorkspace=aligned, ClearCalibration=True)
grouping_file = os.path.join(calibration_dir, grouping_file_name)
focused = mantid.DiffractionFocussing(InputWorkspace=aligned, GroupingFileName=grouping_file,
OutputWorkspace=output_name)
common.remove_intermediate_workspace([calibration_ws, rebinned, cross_correlated, rebinned_tof, aligned,
offsets_ws_name])
return focused
| gpl-3.0 | 3,089,129,844,319,107,600 | 58.758621 | 117 | 0.724755 | false | 3.80461 | false | false | false | 0.006347 |
cortedeltimo/SickRage | sickbeard/providers/bjshare.py | 3 | 12234 | # coding=utf-8
# Author: DarkSupremo <uilton.dev@gmail.com>
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""Provider code for BJ-Share."""
from __future__ import unicode_literals
import re
from requests.compat import urljoin
from requests.utils import add_dict_to_cookiejar, dict_from_cookiejar
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class BJShareProvider(TorrentProvider):
"""BJ-Share Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(BJShareProvider, self).__init__('BJ-Share')
# URLs
self.url = 'https://bj-share.me'
self.urls = {
'login': "https://bj-share.me/login.php",
'search': urljoin(self.url, 'torrents.php')
}
# Credentials
self.enable_cookies = True
self.cookies = ''
self.username = None
self.password = None
self.required_cookies = ['session']
# Torrent Stats
self.minseed = None
self.minleech = None
# Miscellaneous Options
self.supports_absolute_numbering = True
self.max_back_pages = 2
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']
# Cache
self.cache = tvcache.TVCache(self)
# One piece and Boruto is the only animes that i'm aware that is in "absolute" numbering, the problem is that
# they include the season (wrong season) and episode as absolute, eg: One Piece - S08E836
# 836 is the latest episode in absolute numbering, that is correct, but S08 is not the current season...
# So for this show, i don't see a other way to make it work...
#
# All others animes that i tested is with correct season and episode set, so i can't remove the season from all
# or will break everything else
#
# In this indexer, it looks that it is added "automatically", so all current and new releases will be broken
# until they or the source from where they get that info fix it...
self.absolute_numbering = [
'One Piece', 'Boruto: Naruto Next Generations'
]
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Informations about the episode being searched (when not RSS)
:returns: A list of search results (structure)
"""
results = []
if not self.login():
return results
anime = False
if ep_obj and ep_obj.show:
anime = ep_obj.show.anime == 1
search_params = {
'order_by': 'time',
'order_way': 'desc',
'group_results': 0,
'action': 'basic',
'searchsubmit': 1
}
if 'RSS' in search_strings.keys():
search_params['filter_cat[14]'] = 1 # anime
search_params['filter_cat[2]'] = 1 # tv shows
elif anime:
search_params['filter_cat[14]'] = 1 # anime
else:
search_params['filter_cat[2]'] = 1 # tv shows
for mode in search_strings:
items = []
logger.log(u'Search Mode: {0}'.format(mode), logger.DEBUG)
# if looking for season, look for more pages
if mode == 'Season':
self.max_back_pages = 10
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u'Search string: {0}'.format(search_string.decode('utf-8')), logger.DEBUG)
# Remove season / episode from search (not supported by tracker)
search_str = re.sub(r'\d+$' if anime else r'[S|E]\d\d', '', search_string).strip()
search_params['searchstr'] = search_str
next_page = 1
has_next_page = True
while has_next_page and next_page <= self.max_back_pages:
search_params['page'] = next_page
logger.log(u'Page Search: {0}'.format(next_page), logger.DEBUG)
next_page += 1
response = self.session.get(self.urls['search'], params=search_params)
if not response:
logger.log('No data returned from provider', logger.DEBUG)
continue
result = self._parse(response.content, mode)
has_next_page = result['has_next_page']
items += result['items']
results += items
return results
def _parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A KV with a list of items found and if there's an next page to search
"""
def process_column_header(td):
ret = u''
if td.a and td.a.img:
ret = td.a.img.get('title', td.a.get_text(strip=True))
if not ret:
ret = td.get_text(strip=True)
return ret
items = []
has_next_page = False
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='torrent_table')
torrent_rows = torrent_table('tr') if torrent_table else []
# ignore next page in RSS mode
has_next_page = mode != 'RSS' and html.find('a', class_='pager_next') is not None
logger.log(u'More Pages? {0}'.format(has_next_page), logger.DEBUG)
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
return {'has_next_page': has_next_page, 'items': []}
# '', '', 'Name /Year', 'Files', 'Time', 'Size', 'Snatches', 'Seeders', 'Leechers'
labels = [process_column_header(label) for label in torrent_rows[0]('td')]
group_title = u''
# Skip column headers
for result in torrent_rows[1:]:
cells = result('td')
result_class = result.get('class')
# When "Grouping Torrents" is enabled, the structure of table change
group_index = -2 if 'group_torrent' in result_class else 0
try:
title = result.select('a[href^="torrents.php?id="]')[0].get_text()
title = re.sub('\s+', ' ', title).strip() # clean empty lines and multiple spaces
if 'group' in result_class or 'torrent' in result_class:
# get international title if available
title = re.sub('.* \[(.*?)\](.*)', r'\1\2', title)
if 'group' in result_class:
group_title = title
continue
# Clean dash between title and season/episode
title = re.sub('- (S\d{2}(E\d{2,4})?)', r'\1', title)
for serie in self.absolute_numbering:
if serie in title:
# remove season from title when its in absolute format
title = re.sub('S\d{2}E(\d{2,4})', r'\1', title)
break
download_url = urljoin(self.url,
result.select('a[href^="torrents.php?action=download"]')[0]['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('Seeders') + group_index].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers') + group_index].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the"
" minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
torrent_details = None
if 'group_torrent' in result_class:
# torrents belonging to a group
torrent_details = title
title = group_title
elif 'torrent' in result_class:
# standalone/un grouped torrents
torrent_details = cells[labels.index('Nome/Ano')].find('div', class_='torrent_info').get_text()
torrent_details = torrent_details.replace('[', ' ').replace(']', ' ').replace('/', ' ')
torrent_details = torrent_details.replace('Full HD ', '1080p').replace('HD ', '720p')
torrent_size = cells[labels.index('Tamanho') + group_index].get_text(strip=True)
size = convert_size(torrent_size) or -1
torrent_name = '{0} {1}'.format(title, torrent_details.strip()).strip()
torrent_name = re.sub('\s+', ' ', torrent_name)
items.append({
'title': torrent_name,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'hash': ''
})
if mode != 'RSS':
logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
(torrent_name, seeders, leechers), logger.DEBUG)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
logger.log('Failed parsing provider.', logger.ERROR)
return {'has_next_page': has_next_page, 'items': items}
def login(self):
"""Login method used for logging in before doing a search and torrent downloads."""
cookie_dict = dict_from_cookiejar(self.session.cookies)
if cookie_dict.get('session'):
return True
if self.cookies:
add_dict_to_cookiejar(self.session.cookies, dict(x.rsplit('=', 1) for x in self.cookies.split(';')))
cookie_dict = dict_from_cookiejar(self.session.cookies)
if cookie_dict.get('session'):
return True
login_params = {
'submit': 'Login',
'username': self.username,
'password': self.password,
'keeplogged': 1,
}
response = self.get_url(self.urls['login'], post_data=login_params, returns='text')
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('<title>Login :: BJ-Share</title>', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
provider = BJShareProvider()
| gpl-3.0 | -3,524,029,654,109,809,000 | 39.376238 | 119 | 0.541197 | false | 4.234683 | false | false | false | 0.004005 |
auvsi-suas/interop | server/auvsi_suas/models/map.py | 1 | 1134 | """Map model."""
import logging
from auvsi_suas.models import pb_utils
from auvsi_suas.proto import interop_admin_api_pb2
from django.conf import settings
from django.contrib import admin
from django.db import models
logger = logging.getLogger(__name__)
class Map(models.Model):
"""Map submission for a team."""
# The mission this is a map for.
mission = models.ForeignKey('MissionConfig', on_delete=models.CASCADE)
# The user which submitted and owns this map.
user = models.ForeignKey(settings.AUTH_USER_MODEL,
db_index=True,
on_delete=models.CASCADE)
# Uploaded map.
uploaded_map = models.ImageField(upload_to='maps', blank=True)
# Quality assigned by a judge.
quality = models.IntegerField(choices=pb_utils.FieldChoicesFromEnum(
interop_admin_api_pb2.MapEvaluation.MapQuality),
null=True,
blank=True)
@admin.register(Map)
class MapModelAdmin(admin.ModelAdmin):
raw_id_fields = ('mission', )
list_display = ('pk', 'mission', 'user', 'quality')
| apache-2.0 | 5,132,380,777,794,623,000 | 30.5 | 74 | 0.640212 | false | 3.910345 | false | false | false | 0 |
bgaunt/openEcslent | engine/actionMgr.py | 2 | 5839 | #---------------------------------------------------------------------------
# Copyright 2010, 2011 Sushil J. Louis and Christopher E. Miles,
# Evolutionary Computing Systems Laboratory, Department of Computer Science
# and Engineering, University of Nevada, Reno.
#
# This file is part of OpenECSLENT
#
# OpenECSLENT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenECSLENT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenECSLENT. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
#-------------------------End Copyright Notice------------------------------
import exceptions
from mgr import Mgr
from vector import vector3
import timer
import os
import yaml
import traceback
import command
class ActionHistory(object):
def __init__(self):
self.IimeOnClose = 0.0
self.actions = []
class Action(object):
def __init__(self, time):
self.time = time
def do(self, engine):
raise exceptions.CallingAbstractFunction
class CreateEntity(Action):
def __init__(self, time, handle, type):
Action.__init__(self, time)
self.handle = handle
self.type = type
def do(self, engine):
#print 'CreateEntity.do', self.handle, self.type
engine.entMgr.createEntity(self.handle, self.type)
class MoveEntity(Action):
def __init__(self, time, handle, pos):
Action.__init__(self, time)
self.handle = handle
self.pos = pos
def do(self, engine):
#print 'MoveEntity.do', self.handle, self.pos
ent = engine.entMgr.findEntFromHandle(self.handle)
ent.pos = self.pos
class MoveToAction(Action):
def __init__(self, time, handle, desiredState, replaceExistingCommands):
Action.__init__(self, time)
self.handle = handle
self.desiredState = desiredState
self.replaceExistingCommands = replaceExistingCommands
def do(self, engine):
ent = engine.entMgr.findEntFromHandle(self.handle)
self.desiredState.connectToEngine(engine)
cmd = command.MoveTo(engine, self.desiredState)
if self.replaceExistingCommands:
ent.squad.SquadAI.commands = []
ent.squad.SquadAI.commands.append(cmd)
class AdjustSpeed(Action):
def __init__(self, time, handle, speed):
Action.__init__(self, time)
self.handle = handle
self.speed = speed
def do(self, engine):
ent = engine.entMgr.findEntFromHandle(self.handle)
ent.UnitAI.navDesiredSpeed = self.speed
ent.UnitAI.command.desiredSpeed = self.speed
ent.uiDesiredSpeed = self.speed
class ActionMgr(Mgr):
history = ActionHistory()
pendingActions = []
def initialize(self):
pass
def loadLevel(self):
#print 'ActionMgr.loadLevel'
for filename in self.engine.localOptions.gameOptions.toLoad:
filename = os.path.join('ActionHistory/', filename)
f = open(filename, 'r')
s = f.read()
f.close()
history = yaml.load(s)
for action in history.actions:
self.enqueue(action)
historyFilename = 'ActionHistory/actionHistory.yaml'
historyFilenameBackup = 'ActionHistory/actionHistory_backup.yaml'
toFileTimer = timer.Timer(1.0)
dirty = False
def tick(self, dtime):
while self.pendingActions:
action = self.pendingActions[0]
#print action, action.time, self.engine.gameTime
if self.engine.gameTime >= action.time:
self.pendingActions.pop(0)
self.do(action)
else:
break #list is sorted - if first fails, all fail
if self.dirty and self.toFileTimer.check(dtime):
self.dirty = False
try:
os.unlink(self.historyFilenameBackup)
except:
print 'Failed to erase old history file'
traceback.print_exc()
try:
os.rename(self.historyFilename, self.historyFilenameBackup)
except:
print 'Failed to backup history file'
traceback.print_exc()
s = yaml.dump(self.history)
f = open(self.historyFilename, 'w')
f.write(s)
f.close()
def do(self, action):
action.do(self.engine)
#print 'ActionMgr.do', action
def enqueue(self, action):
self.dirty = True
self.pendingActions.append(action)
self.pendingActions.sort(key=lambda action:action.time)
#self.history.actions.append(action)
#self.history.actions.sort(key=lambda action:action.time)
#class TestEntityClass:
#def __init__(self):
#pass
#testMode = True
#print 'ActionMgr.Test'
#doSaveTest = True
#if doSaveTest:
#h = ActionHistory()
#a1 = CreateEntity(time=1.0, type=TestEntityClass, pos=vector3(1,0,2))
#h.actions.append(a1)
#print h
#s = yaml.dump(h)
#print s
#f = open('test.yaml', 'w')
#f.write(s)
#f.close()
#doLoadTest = True
#if doLoadTest:
#f = open('test.yaml', 'r')
#s = f.read()
#f.close()
#print s
#h = yaml.load(s)
#print h
| gpl-3.0 | -8,400,716,926,673,314,000 | 31.803371 | 78 | 0.593595 | false | 3.96133 | true | false | false | 0.009419 |
NicovincX2/Python-3.5 | Algèbre/Algèbre linéaire/Matrice/Matrice inversible/inverseGauss1_iter.py | 1 | 2030 | # -*- coding: utf-8 -*-
import os
# compute the inverse of a matrix via Newton iteration. This is based
# on problem 4.12 in Garcia. See also
# http://amca01.wordpress.com/2010/08/18/the-nsh-method-for-matrix-inversion/
# (which cites Gunther Schultz 1933 and Harold Hotelling 1943)
# also Wikipedia for invertible matrix notes:
#
# Newton inverse is useful for inverting similar matrixes (use
# previous inverse as starting guess).
#
# Note: this is very sensitive to the initial guess. It will diverge
# quickly if the initial guess gives products that are all > 1
import numpy
tol = 1.e-12
def iter_inverse(A, Ainv0):
""" here A is the matrix and Ainv0 is an initial guess to the
inverse """
Ainv = Ainv0.copy()
err = 1.e10
iter = 0
while (err > tol):
Ainv_new = 2.0 * Ainv - numpy.dot(Ainv, numpy.dot(A, Ainv))
err = numpy.max(numpy.abs(Ainv - Ainv_new))
Ainv = Ainv_new.copy()
iter += 1
print("number of iterations = ", iter)
return Ainv
# some attempts
A = numpy.array([[4, 3, 4, 10],
[2, -7, 3, 0],
[-2, 11, 1, 3],
[3, -4, 0, 2]], dtype=numpy.float64)
# identity
print("calling with Ainv0 = I")
Ainv = iter_inverse(A, numpy.eye(4))
print(numpy.dot(A, Ainv))
print(" ")
# transpose scaled by maximum element **2
print("calling with Ainv0 = A^T/max(A)**2")
Ainv = iter_inverse(A, numpy.transpose(A) / numpy.max(numpy.abs(A))**2)
print(numpy.dot(A, Ainv))
print(" ")
# diagonal of 1/ A's diagonal
Ainv = numpy.diagflat(1.0 / numpy.diag(A))
print("calling with diag(Ainv0) = 1.0/diag(A)")
Ainv = iter_inverse(A, numpy.transpose(A) / numpy.max(numpy.abs(A))**2)
print(numpy.dot(A, Ainv))
print(" ")
# matrix with all elements = 1/max(A)
Ainv = numpy.ones(A.shape) / numpy.max(numpy.abs(A))
print("calling with Ainv_ij = 1.0/max(|A|)")
Ainv = iter_inverse(A, numpy.transpose(A) / numpy.max(numpy.abs(A))**2)
print(numpy.dot(A, Ainv))
print(" ")
os.system("pause")
| gpl-3.0 | -1,571,508,430,861,287,200 | 21.307692 | 77 | 0.629064 | false | 2.788462 | false | false | false | 0 |
CrossWaterBridge/python-lds-scriptures | scriptures/tests/test_structure.py | 2 | 1545 | import unittest
from scriptures.structure import Structure, Testament, Book, Chapter
class TestStructure(unittest.TestCase):
def test_testaments(self):
self.assertEqual(Structure().testaments(), [
Testament(uri='/scriptures/ot'),
Testament(uri='/scriptures/nt'),
Testament(uri='/scriptures/bofm'),
Testament(uri='/scriptures/dc-testament'),
Testament(uri='/scriptures/pgp'),
])
def test_books(self):
self.assertEqual(Structure().books(testament=Testament(uri='/scriptures/dc-testament')), [
Book(uri='/scriptures/dc-testament/dc'),
Book(uri='/scriptures/dc-testament/od'),
])
self.assertEqual(Structure().books(testament=Testament(uri='/scriptures/pgp')), [
Book(uri='/scriptures/pgp/moses'),
Book(uri='/scriptures/pgp/abr'),
Book(uri='/scriptures/pgp/js-m'),
Book(uri='/scriptures/pgp/js-h'),
Book(uri='/scriptures/pgp/a-of-f'),
])
def test_chapters(self):
self.assertEqual(Structure().chapters(book=Book(uri='/scriptures/dc-testament/od')), [
Chapter(uri='/scriptures/dc-testament/od/1', verse_count=0),
Chapter(uri='/scriptures/dc-testament/od/2', verse_count=0),
])
self.assertEqual(Structure().chapters(book=Book(uri='/scriptures/ot/hag')), [
Chapter(uri='/scriptures/ot/hag/1', verse_count=15),
Chapter(uri='/scriptures/ot/hag/2', verse_count=23),
])
| mit | 1,531,557,741,728,824,600 | 41.916667 | 98 | 0.601942 | false | 3.503401 | true | false | false | 0.002589 |
puckipedia/youtube-dl | youtube_dl/extractor/brightcove.py | 89 | 15403 | # encoding: utf-8
from __future__ import unicode_literals
import re
import json
import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
unescapeHTML,
unsmuggle_url,
)
class BrightcoveIE(InfoExtractor):
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'id': '2371591881001',
'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'info_dict': {
'id': '1785452137001',
'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
},
{
# test flv videos served by akamaihd.net
# From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3ABC2996102916001&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
# The md5 checksum changes on each download
'info_dict': {
'id': '2996102916001',
'ext': 'flv',
'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'uploader': 'Red Bull TV',
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
},
},
{
# playlist test
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
'id': '3550319591001',
},
'playlist_mincount': 7,
},
]
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
try:
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return None
params = {}
playerID = find_param('playerID')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# The three fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID')
if videoPlayer is not None:
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
return cls._make_brightcove_url(params)
@classmethod
def _build_brighcove_url_from_js(cls, object_js):
# The layout of JS is as follows:
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
# // build Brightcove <object /> XML
# }
m = re.search(
r'''(?x)customBC.\createVideo\(
.*? # skipping width and height
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
# in length, however it's appended to itself
# in places, so truncate
["\'](?P<videoID>\d+)["\'] # @videoPlayer
''', object_js)
if m:
return cls._make_brightcove_url(m.groupdict())
@classmethod
def _make_brightcove_url(cls, params):
data = compat_urllib_parse.urlencode(params)
return cls._FEDERATED_URL_TEMPLATE % data
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(
r'<meta\s+property=[\'"]og:video[\'"]\s+content=[\'"](https?://(?:secure|c)\.brightcove.com/[^\'"]+)[\'"]',
webpage)
if url_m:
url = unescapeHTML(url_m.group(1))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url or 'videoId' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?>\s*</object>''',
webpage)
if matches:
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
return list(filter(None, [
cls._build_brighcove_url_from_js(custom_bc)
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query_str, query, referer=referer)
elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
else:
raise ExtractorError(
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
expected=True)
def _get_video_info(self, video_id, query_str, query, referer=None):
request_url = self._FEDERATED_URL_TEMPLATE % query_str
req = compat_urllib_request.Request(request_url)
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
req.add_header('Referer', referer)
webpage = self._download_webpage(req, video_id)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
'brightcove said: %s' % error_msg, expected=True)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' not in json_data:
raise ExtractorError('Empty playlist')
playlist_info = json_data['videoList']
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
def _extract_video_info(self, video_info):
info = {
'id': compat_str(video_info['id']),
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
}
renditions = video_info.get('renditions')
if renditions:
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
ext = None
if rend['remote']:
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(url, info['id'], 'mp4'))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
if ext is None:
ext = determine_ext(url)
size = rend.get('size')
formats.append({
'url': url,
'ext': ext,
'height': rend.get('frameHeight'),
'width': rend.get('frameWidth'),
'filesize': size if size != 0 else None,
})
self._sort_formats(formats)
info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % info['id'])
return info
| unlicense | -7,732,271,752,180,358,000 | 43.244253 | 483 | 0.547899 | false | 3.601637 | false | false | false | 0.002598 |
dou800/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/lib2to3/fixes/fix_types.py | 304 | 1806 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for removing uses of the types module.
These work for only the known names in the types module. The forms above
can include types. or not. ie, It is assumed the module is imported either as:
import types
from types import ... # either * or specific types
The import statements are not modified.
There should be another fixer that handles at least the following constants:
type([]) -> list
type(()) -> tuple
type('') -> str
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name
_TYPE_MAPPING = {
'BooleanType' : 'bool',
'BufferType' : 'memoryview',
'ClassType' : 'type',
'ComplexType' : 'complex',
'DictType': 'dict',
'DictionaryType' : 'dict',
'EllipsisType' : 'type(Ellipsis)',
#'FileType' : 'io.IOBase',
'FloatType': 'float',
'IntType': 'int',
'ListType': 'list',
'LongType': 'int',
'ObjectType' : 'object',
'NoneType': 'type(None)',
'NotImplementedType' : 'type(NotImplemented)',
'SliceType' : 'slice',
'StringType': 'bytes', # XXX ?
'StringTypes' : 'str', # XXX ?
'TupleType': 'tuple',
'TypeType' : 'type',
'UnicodeType': 'str',
'XRangeType' : 'range',
}
_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
class FixTypes(fixer_base.BaseFix):
BM_compatible = True
PATTERN = '|'.join(_pats)
def transform(self, node, results):
new_value = unicode(_TYPE_MAPPING.get(results["name"].value))
if new_value:
return Name(new_value, prefix=node.prefix)
return None
| mit | 4,309,489,426,129,548,000 | 28.129032 | 80 | 0.59247 | false | 3.7625 | false | false | false | 0.009413 |
wso2-incubator/device-cloud-appliances | DigitalDisplay/modules/update_listener.py | 5 | 7066 | """
/**
* Copyright (c) 2015, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* WSO2 Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except
* in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
**/
"""
import subprocess
import os
import sys
import threading
import logging
import kernel_utils as kernel_utils
LOGGER = logging.getLogger('wso2server.version_control')
class VersionControlError(Exception):
pass
class Repository(object):
def replace_params(self, str_):
str_.replace("$dir", self.repo_name)
return str_.replace("$url", self.repo_url)
def __init__(self, repo_name_, repo_url_, repo_path_, repo_conf_):
self.repo_name = repo_name_
self.repo_url = repo_url_
self.repo_commands = repo_conf_['Commands']
self.local_repo_base_path = repo_path_
# local_repo_base_path = os.getcwd()
self.local_repo_path = os.path.join(self.local_repo_base_path, self.repo_name)
self.init()
def __makedirs(self, dir_path):
"""Adding os.makedirs() support for python 2.7.3"""
if dir_path and not os.path.isdir(dir_path):
head, tail = os.path.split(dir_path)
self.__makedirs(head)
os.mkdir(dir_path, 0777)
def init_local_repo(self):
LOGGER.debug("Creating local repository " + self.repo_name + "...!")
self.__makedirs(self.local_repo_base_path)
os.chdir(self.local_repo_base_path)
command = self.replace_params(self.repo_commands["Init"])
try:
# call command and wait to complete
subprocess.call(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True, shell=True)
except OSError, e:
if e.errno == 2:
LOGGER.debug("Command `" + command + "` Not found...!")
sys.exit(1)
except:
LOGGER.debug("Error occurred on initializing repository: " + str(sys.exc_info()[1]))
def update_local_repo(self):
LOGGER.debug("Updating local repo " + self.repo_name + "...")
os.chdir(self.local_repo_path)
command = self.replace_params(self.repo_commands["Update"])
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True, shell=True)
p.wait()
except OSError, e:
if e.errno == 2:
LOGGER.warning("Command `" + command + "` Not found...!")
sys.exit(1)
except:
LOGGER.warning("Error occurred updating local repository: " + str(sys.exc_info()[1]))
def check_remote_changes(self):
os.chdir(self.local_repo_path)
command = self.replace_params(self.repo_commands["LocalRevision"])
try:
local_rev = subprocess.check_output(command, stderr=subprocess.PIPE, shell=True)
local_rev = local_rev.strip()
except subprocess.CalledProcessError:
LOGGER.warning(
"Error occurred on local: " + str(sys.exc_info()[1]))
return False
command = self.replace_params(self.repo_commands["RemoteRevision"])
try:
remote_rev = subprocess.check_output(command, stderr=subprocess.PIPE, shell=True)
remote_rev = remote_rev.strip()
except OSError, e:
if e.errno == 2:
LOGGER.warning("Command `" + command + "` Not found...!")
sys.exit(1) # no need to continue
except subprocess.CalledProcessError:
LOGGER.warning(
"Error occurred on remote: " + str(sys.exc_info()[1]))
return False
LOGGER.debug(self.repo_name)
LOGGER.debug("local rev: '" + local_rev + "'")
LOGGER.debug("remote_rev: '" + remote_rev + "'")
if local_rev == remote_rev:
return False # no changes needed
elif local_rev == "" or remote_rev == "":
return False # no changes needed
else:
return True # changes needed
LOGGER.debug("changes detected")
def init(self):
if not (os.path.exists(self.local_repo_path)):
self.init_local_repo()
class UpdateListener(object):
lock = None
@staticmethod
def __get_repo_handler(handlers, update_conf):
"""
Returns repository handler to execute update commands.
"""
try:
# get handler name for the repository
repo_handler_name = update_conf['Repository']['VCSHandler']
except KeyError:
LOGGER.debug("No VCSHandler found...!")
raise
# get handler for the repository
repo_handler = [handler for handler in handlers if handler['@name'] == repo_handler_name]
if len(repo_handler) == 0:
raise VersionControlError(
"Repo Handler " + repo_handler_name + " is not defined in VCSHandlers!")
return repo_handler[0]
@staticmethod
def execute_copy_script(dd_kernel_repo_name):
if not (os.path.exists(kernel_utils.temp_path)):
os.mkdir(kernel_utils.temp_path)
os.chdir(kernel_utils.scripts_path)
os.system("sh update_script.sh '" + kernel_utils.base_path+"' "+dd_kernel_repo_name)
LOGGER.debug("running update_script.sh")
@staticmethod
def update(handlers, update_conf, lock, safe_exit_handler=None):
"""
Initiates polling program or content updates.
"""
repo_handler = UpdateListener.__get_repo_handler(handlers, update_conf)
lock.acquire()
dd_kernel_folder = kernel_utils.temp_path
try:
dd_kernel_repo_name = update_conf['Repository']['Name']
dd_kernel_repo_url = update_conf['Repository']['Url']
dd_kernel_poll_int = update_conf['PollingInterval']
except KeyError:
lock.release()
raise VersionControlError("Error in reading UpdatePolicy for Kernel...!")
git = Repository(dd_kernel_repo_name, dd_kernel_repo_url, dd_kernel_folder, repo_handler)
if git.check_remote_changes():
git.update_local_repo()
if safe_exit_handler:
safe_exit_handler()
lock.release()
UpdateListener.execute_copy_script(dd_kernel_repo_name)
lock.release()
# repeat it
threading.Timer(kernel_utils.get_seconds(dd_kernel_poll_int), UpdateListener.update,
[handlers, update_conf, lock, safe_exit_handler]).start()
| apache-2.0 | -7,211,420,498,327,021,000 | 35.994764 | 97 | 0.599774 | false | 4.026211 | false | false | false | 0.002406 |
jaliste/sanaviron.gtk-3 | sanaviron/src/3rd/pycha/examples/piechart.py | 6 | 1670 | # Copyright (c) 2007-2008 by Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of PyCha.
#
# PyCha is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyCha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyCha. If not, see <http://www.gnu.org/licenses/>.
import sys
import cairo
import pycha.pie
from lines import lines
def pieChart(output):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 400, 400)
dataSet = [(line[0], [[0, line[1]]]) for line in lines]
options = {
'axis': {
'x': {
'ticks': [dict(v=i, label=d[0]) for i, d in enumerate(lines)],
}
},
'background': {
'hide': True,
},
'padding': {
'left': 70,
'right': 10,
'top': 0,
'bottom': 0,
},
'legend': {
'hide': True,
}
}
chart = pycha.pie.PieChart(surface, options)
chart.addDataset(dataSet)
chart.render()
surface.write_to_png(output)
if __name__ == '__main__':
if len(sys.argv) > 1:
output = sys.argv[1]
else:
output = 'piechart.png'
pieChart(output)
| apache-2.0 | -4,051,244,164,424,351,000 | 25.935484 | 80 | 0.596407 | false | 3.568376 | false | false | false | 0.001796 |
unbornchikken/genevo-python | genevo/optimizers/population.py | 1 | 6041 | import torch
from .organization import Organization
from .dna import dna
from ..tex import tex
class Population:
def __init__(self, owner):
assert hasattr(owner, 'make_seed_dna') and callable(owner.make_seed_dna)
assert hasattr(owner, 'calculate_fitness') and callable(owner.calculate_fitness)
self._owner = owner
self._dna_tensor = tex.constant(0, owner.dna_size, owner.population_size)
self._order_by = torch.arange(0, owner.population_size)
self._bodies = None
self._fitness = None
self._sorted_dna_tensor = None
self._idx = 0
@property
def sorted(self):
return self._fitness is not None and self._bodies is not None
def _verify_sorted(self):
if not self.sorted:
raise RuntimeError('Population is not sorted.')
def __len__(self):
return self._idx
def get_organization_and_fitness_at(self, index, fitness_as_tensor=True):
if index < len(self):
idx = int(self._order_by[index])
body = self._bodies[idx] if self._bodies is not None else None
org = Organization(dna.create(tex.idx(self._dna_tensor, tex.span, idx)), body)
fit = None
if self.sorted:
if fitness_as_tensor:
fit = self._fitness.narrow(0, idx, 1)
else:
fit = self._fitness[idx]
return org, fit
return None
def get_dna_tensor(self, get_sorted=True):
if get_sorted:
self._verify_sorted()
if self._sorted_dna_tensor is None:
self._sorted_dna_tensor = torch.index_select(self._dna_tensor, 1, self._order_by)
return self._sorted_dna_tensor
return self._dna_tensor
def fill(self):
if len(self) != 0:
raise RuntimeError('Population is not empty.')
while len(self) != self._owner.population_size:
self.push(self._owner.make_seed_dna())
def push(self, item):
idx = self._idx
if idx == self._owner.population_size:
raise RuntimeError('Population is full.')
tensor = None
if isinstance(item, tuple):
if len(item) != 2:
raise ValueError('Invalid tuple length.')
if not ((item[0] is None or torch.is_tensor(item[0])) and torch.is_tensor(item[1])):
raise ValueError('Invalid arguments.')
if item[0] is None:
tensor = item[1]
else:
tensor = tex.join(1, item[0], item[1])
elif torch.is_tensor(item):
tensor = item
else:
raise TypeError('Unknown argument.')
if tensor is not None:
tensor_size = tensor.size()
count = tensor_size[1] if len(tensor_size) == 2 else 1
self._dna_tensor[:, idx:idx + count] = tensor
self._idx += count
def calculate_fitness(self):
if self._fitness:
raise RuntimeError('Fitness is already calculated.')
if self._owner.population_size != len(self):
raise RuntimeError('Population is not full.')
res = self._owner.calculate_fitness(self._dna_tensor)
if not isinstance(res, tuple) or len(res) != 2:
raise TypeError('Result of "calculate_fitness" is not a tuple of length 2.')
if not isinstance(res[0], list):
raise TypeError('First value of result of "calculate_fitness" is not a list.')
if len(res[0]) != len(self):
raise ValueError(
'First value of result of "calculate_fitness" length is not {}.'\
.format(len(self)))
self._bodies = res[0]
if not torch.is_tensor(res[1]):
raise TypeError('Second value of result of "calculate_fitness" is not a tensor.')
self._fitness = res[1]
if self._fitness.size() != (len(self), ):
raise ValueError(
'Second value of result of "calculate_fitness" dims is not ({}, ).'\
.format(len(self)))
def sort(self):
self.calculate_fitness()
res = torch.sort(self._fitness)
self._order_by = res[1]
def make_crossovered_dna(self, std_dev=0.3, keep_elites_rate=0.05):
elites, remaining, remaining_size =\
self._initialize_selection(keep_elites_rate)
idx1 =\
(torch.abs(torch.randn(remaining_size) * std_dev) * remaining_size).clamp(max = remaining_size - 1).long()
idx2 =\
(torch.abs(torch.randn(remaining_size) * std_dev) * remaining_size).clamp(max = remaining_size - 1).long()
dna_tensor1 = torch.index_select(remaining, 1, idx1)
dna_tensor2 = torch.index_select(remaining, 1, idx2)
crossovered = dna.crossover(dna_tensor1, dna_tensor2)
return elites, crossovered
def make_normal_dist_dna(self, std_dev=0.3, keep_elites_rate=0.05):
elites, remaining, remaining_size =\
self._initialize_selection(keep_elites_rate)
idx =\
(torch.abs(torch.randn(remaining_size) * std_dev) * remaining_size).clamp(max = remaining_size - 1).long()
dna_tensor = torch.index_select(remaining, 1, idx)
return elites, dna_tensor
def _initialize_selection(self, keep_elites_rate):
sorted_dna_tensor = self.get_dna_tensor()
elite_count = 0
if keep_elites_rate > 0:
if keep_elites_rate < 1:
elite_count = max(round(self._owner.population_size * keep_elites_rate), 1)
else:
elite_count = min(self._owner.population_size, keep_elites_rate)
if elite_count == 0:
return None, sorted_dna_tensor, self._owner.population_size
elites = sorted_dna_tensor[:, 0:elite_count]
remaining = sorted_dna_tensor[:, elite_count:]
remaining_size = self._owner.population_size - elite_count
return elites, remaining, remaining_size | apache-2.0 | -1,173,253,087,416,635,000 | 35.179641 | 118 | 0.578878 | false | 3.833122 | false | false | false | 0.004635 |
thoughtpalette/thoughts.thoughtpalette.com | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/text.py | 94 | 68777 | # -*- coding: utf-8 -*-
"""
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
Generic, Operator, Number, Whitespace, Literal
from pygments.util import get_bool_opt, ClassNotFound
from pygments.lexers.other import BashLexer
__all__ = ['IniLexer', 'PropertiesLexer', 'SourcesListLexer', 'BaseMakefileLexer',
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer', 'HttpLexer',
'PyPyLogLexer', 'RegeditLexer', 'HxmlLexer', 'EbnfLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
*New in Pygments 1.6.*
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.*', String, '#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
*New in Pygments 1.4.*
"""
name = 'Properties'
aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'\s+', Text),
(r'(?:[;#]|//).*$', Comment),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
],
}
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
*New in Pygments 0.7.*
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list', 'debsources']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Text), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.split('\n'):
line = line.strip()
if not (line.startswith('#') or line.startswith('deb ') or
line.startswith('deb-src ') or not line):
return False
return True
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
*New in Pygments 0.10.*
"""
name = 'Base Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
# TODO: add paren handling (grr)
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[^,\\\n#]+', Number),
(r',', Punctuation),
(r'#.*?\n', Comment),
(r'\\\n', Text), # line continuation
(r'\\.', Text),
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
],
}
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace']
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
*New in Pygments 0.10.*
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'{', Operator),
(r'}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
[T ])? # Date/time separator: T or space
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
(\S+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
(\S+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"\S+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
*New in Pygments 0.6.*
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
*New in Pygments 0.6.*
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]*', Text, 'textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(.{2}', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
*New in Pygments 0.6.*
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-zA-Z][a-zA-Z0-9_]*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'$', Text, '#pop'),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"]+', Text)
]
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
*New in Pygments 0.7.*
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'}}}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'{{{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
*New in Pygments 0.7.*
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: langauge`` and
``.. code:: language`` directives with a lexer for the given
language (default: ``True``). *New in Pygments 0.8.*
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
# from docutils.parsers.rst.states
closers = '\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = '\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
*New in Pygments 0.8.*
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc',
'_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
tokens = {
'root': [
(r'^\s*".*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vimbuiltins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w: return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
*New in Pygments 0.9.*
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
#(r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([A-Za-z-]+:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
*New in Pygments 0.9.*
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = [
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
]
opts = [
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
]
actions = [
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid\.conf',
]
actions_stats = [
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
]
actions_log = ["status", "enable", "disable", "clear"]
acls = [
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
]
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
def makelistre(list):
return r'\b(?:' + '|'.join(list) + r')\b'
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(makelistre(keywords), Keyword),
(makelistre(opts), Name.Constant),
# Actions
(makelistre(actions), String),
(r'stats/'+makelistre(actions), String),
(r'log/'+makelistre(actions)+r'=', String),
(makelistre(acls), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.*', Comment, '#pop'),
],
}
class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
*New in Pygments 0.9.*
"""
name = 'Debian Control file'
aliases = ['control', 'debcontrol']
filenames = ['control']
tokens = {
'root': [
(r'^(Description)', Keyword, 'description'),
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
(r'^((Build-)?Depends)', Keyword, 'depends'),
(r'^((?:Python-)?Version)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^((?:Installed-)?Size)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^(MD5Sum|SHA1|SHA256)(:\s*)(\S+)$',
bygroups(Keyword, Text, Number)),
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
bygroups(Keyword, Whitespace, String)),
],
'maintainer': [
(r'<[^>]+>', Generic.Strong),
(r'<[^>]+>$', Generic.Strong, '#pop'),
(r',\n?', Text),
(r'.', Text),
],
'description': [
(r'(.*)(Homepage)(: )(\S+)',
bygroups(Text, String, Name, Name.Class)),
(r':.*\n', Generic.Strong),
(r' .*\n', Text),
('', Text, '#pop'),
],
'depends': [
(r':\s*', Text),
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
(r'\(', Text, 'depend_vers'),
(r',', Text),
(r'\|', Operator),
(r'[\s]+', Text),
(r'[}\)]\s*$', Text, '#pop'),
(r'}', Text),
(r'[^,]$', Name.Function, '#pop'),
(r'([\+\.a-zA-Z0-9-])(\s*)', bygroups(Name.Function, Text)),
(r'\[.*?\]', Name.Entity),
],
'depend_vers': [
(r'\),', Text, '#pop'),
(r'\)[^,]', Text, '#pop:2'),
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
]
}
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
*New in Pygments 0.11.*
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors' : [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+', Text),
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
#(r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
*New in Pygments 1.2.*
"""
name = 'CMake'
aliases = ['cmake']
filenames = ['*.cmake', 'CMakeLists.txt']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b(\w+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^\)$"# \t\n]+', String),
(r'\n', Text), # explicitly legal
include('keywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Text),
(r'#.+\n', Comment),
]
}
class HttpLexer(RegexLexer):
"""
Lexer for HTTP sessions.
*New in Pygments 1.5.*
"""
name = 'HTTP'
aliases = ['http']
flags = re.DOTALL
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
if ';' in content_type:
content_type = content_type[:content_type.find(';')].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
def continuous_header_callback(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Literal, match.group(2)
yield match.start(3), Text, match.group(3)
def content_callback(self, match):
content_type = getattr(self, 'content_type', None)
content = match.group()
offset = match.start()
if content_type:
from pygments.lexers import get_lexer_for_mimetype
try:
lexer = get_lexer_for_mimetype(content_type)
except ClassNotFound:
pass
else:
for idx, token, value in lexer.get_tokens_unprocessed(content):
yield offset + idx, token, value
return
yield offset, Text, content
tokens = {
'root': [
(r'(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH)( +)([^ ]+)( +)'
r'(HTTP)(/)(1\.[01])(\r?\n|$)',
bygroups(Name.Function, Text, Name.Namespace, Text,
Keyword.Reserved, Operator, Number, Text),
'headers'),
(r'(HTTP)(/)(1\.[01])( +)(\d{3})( +)([^\r\n]+)(\r?\n|$)',
bygroups(Keyword.Reserved, Operator, Number, Text, Number,
Text, Name.Exception, Text),
'headers'),
],
'headers': [
(r'([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|$)', header_callback),
(r'([\t ]+)([^\r\n]+)(\r?\n|$)', continuous_header_callback),
(r'\r?\n', Text, 'content')
],
'content': [
(r'.+', content_callback)
]
}
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"^\+\d+: ", Comment),
(r"--end of the loop--", Comment),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()(\w+(?:\.\w+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr|TargetToken)", Name),
(r"<.*?>+", Name.Builtin),
(r"(label|debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|float_neg|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|ptr_ne|instance_ptr_eq|instance_ptr_ne|"
r"cast_int_to_float|cast_float_to_int|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|"
r"virtual_ref|mark_opaque_ptr|"
r"call_may_force|call_assembler|call_loopinvariant|"
r"call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|"
r"getfield_gc|getinteriorfield_gc|setinteriorfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r":", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"\s+", Text),
(r"#.*?$", Comment),
],
}
class HxmlLexer(RegexLexer):
"""
Lexer for `haXe build <http://haxe.org/doc/compiler>`_ files.
*New in Pygments 1.6.*
"""
name = 'Hxml'
aliases = ['haxeml', 'hxml']
filenames = ['*.hxml']
tokens = {
'root': [
# Seperator
(r'(--)(next)', bygroups(Punctuation, Generic.Heading)),
# Compiler switches with one dash
(r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)),
# Compilerswitches with two dashes
(r'(--)(neko-source|flash-strict|flash-use-stage|no-opt|no-traces|'
r'no-inline|times|no-output)', bygroups(Punctuation, Keyword)),
# Targets and other options that take an argument
(r'(-)(cpp|js|neko|x|as3|swf9?|swf-lib|php|xml|main|lib|D|resource|'
r'cp|cmd)( +)(.+)',
bygroups(Punctuation, Keyword, Whitespace, String)),
# Options that take only numerical arguments
(r'(-)(swf-version)( +)(\d+)',
bygroups(Punctuation, Keyword, Number.Integer)),
# An Option that defines the size, the fps and the background
# color of an flash movie
(r'(-)(swf-header)( +)(\d+)(:)(\d+)(:)(\d+)(:)([A-Fa-f0-9]{6})',
bygroups(Punctuation, Keyword, Whitespace, Number.Integer,
Punctuation, Number.Integer, Punctuation, Number.Integer,
Punctuation, Number.Hex)),
# options with two dashes that takes arguments
(r'(--)(js-namespace|php-front|php-lib|remap|gen-hx-classes)( +)'
r'(.+)', bygroups(Punctuation, Keyword, Whitespace, String)),
# Single line comment, multiline ones are not allowed.
(r'#.*', Comment.Single)
]
}
class EbnfLexer(RegexLexer):
"""
Lexer for `ISO/IEC 14977 EBNF
<http://en.wikipedia.org/wiki/Extended_Backus%E2%80%93Naur_Form>`_
grammars.
*New in Pygments 1.7.*
"""
name = 'EBNF'
aliases = ['ebnf']
filenames = ['*.ebnf']
mimetypes = ['text/x-ebnf']
tokens = {
'root': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'=', Operator, 'production'),
],
'production': [
include('whitespace'),
include('comment_start'),
include('identifier'),
(r'"[^"]*"', String.Double),
(r"'[^']*'", String.Single),
(r'(\?[^?]*\?)', Name.Entity),
(r'[\[\]{}(),|]', Punctuation),
(r'-', Operator),
(r';', Punctuation, '#pop'),
],
'whitespace': [
(r'\s+', Text),
],
'comment_start': [
(r'\(\*', Comment.Multiline, 'comment'),
],
'comment': [
(r'[^*)]', Comment.Multiline),
include('comment_start'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[*)]', Comment.Multiline),
],
'identifier': [
(r'([a-zA-Z][a-zA-Z0-9 \-]*)', Keyword),
],
}
| mit | -748,726,642,388,530,200 | 35.332277 | 89 | 0.47244 | false | 3.666151 | false | false | false | 0.000974 |
waytai/django-rest-framework | rest_framework/utils/model_meta.py | 71 | 5904 | """
Helper function for returning the field information that is associated
with a model class. This includes returning all the forward and reverse
relationships and their associated metadata.
Usage: `get_field_info(model)` returns a `FieldInfo` instance.
"""
import inspect
from collections import namedtuple
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils import six
from rest_framework.compat import OrderedDict
FieldInfo = namedtuple('FieldResult', [
'pk', # Model field instance
'fields', # Dict of field name -> model field instance
'forward_relations', # Dict of field name -> RelationInfo
'reverse_relations', # Dict of field name -> RelationInfo
'fields_and_pk', # Shortcut for 'pk' + 'fields'
'relations' # Shortcut for 'forward_relations' + 'reverse_relations'
])
RelationInfo = namedtuple('RelationInfo', [
'model_field',
'related_model',
'to_many',
'has_through_model'
])
def _resolve_model(obj):
"""
Resolve supplied `obj` to a Django model class.
`obj` must be a Django model class itself, or a string
representation of one. Useful in situations like GH #1225 where
Django may not have resolved a string-based reference to a model in
another model's foreign key definition.
String representations should have the format:
'appname.ModelName'
"""
if isinstance(obj, six.string_types) and len(obj.split('.')) == 2:
app_name, model_name = obj.split('.')
resolved_model = models.get_model(app_name, model_name)
if resolved_model is None:
msg = "Django did not return a model for {0}.{1}"
raise ImproperlyConfigured(msg.format(app_name, model_name))
return resolved_model
elif inspect.isclass(obj) and issubclass(obj, models.Model):
return obj
raise ValueError("{0} is not a Django model".format(obj))
def get_field_info(model):
"""
Given a model class, returns a `FieldInfo` instance, which is a
`namedtuple`, containing metadata about the various field types on the model
including information about their relationships.
"""
opts = model._meta.concrete_model._meta
pk = _get_pk(opts)
fields = _get_fields(opts)
forward_relations = _get_forward_relationships(opts)
reverse_relations = _get_reverse_relationships(opts)
fields_and_pk = _merge_fields_and_pk(pk, fields)
relationships = _merge_relationships(forward_relations, reverse_relations)
return FieldInfo(pk, fields, forward_relations, reverse_relations,
fields_and_pk, relationships)
def _get_pk(opts):
pk = opts.pk
while pk.rel and pk.rel.parent_link:
# If model is a child via multi-table inheritance, use parent's pk.
pk = pk.rel.to._meta.pk
return pk
def _get_fields(opts):
fields = OrderedDict()
for field in [field for field in opts.fields if field.serialize and not field.rel]:
fields[field.name] = field
return fields
def _get_forward_relationships(opts):
"""
Returns an `OrderedDict` of field names to `RelationInfo`.
"""
forward_relations = OrderedDict()
for field in [field for field in opts.fields if field.serialize and field.rel]:
forward_relations[field.name] = RelationInfo(
model_field=field,
related_model=_resolve_model(field.rel.to),
to_many=False,
has_through_model=False
)
# Deal with forward many-to-many relationships.
for field in [field for field in opts.many_to_many if field.serialize]:
forward_relations[field.name] = RelationInfo(
model_field=field,
related_model=_resolve_model(field.rel.to),
to_many=True,
has_through_model=(
not field.rel.through._meta.auto_created
)
)
return forward_relations
def _get_reverse_relationships(opts):
"""
Returns an `OrderedDict` of field names to `RelationInfo`.
"""
# Note that we have a hack here to handle internal API differences for
# this internal API across Django 1.7 -> Django 1.8.
# See: https://code.djangoproject.com/ticket/24208
reverse_relations = OrderedDict()
for relation in opts.get_all_related_objects():
accessor_name = relation.get_accessor_name()
related = getattr(relation, 'related_model', relation.model)
reverse_relations[accessor_name] = RelationInfo(
model_field=None,
related_model=related,
to_many=relation.field.rel.multiple,
has_through_model=False
)
# Deal with reverse many-to-many relationships.
for relation in opts.get_all_related_many_to_many_objects():
accessor_name = relation.get_accessor_name()
related = getattr(relation, 'related_model', relation.model)
reverse_relations[accessor_name] = RelationInfo(
model_field=None,
related_model=related,
to_many=True,
has_through_model=(
(getattr(relation.field.rel, 'through', None) is not None) and
not relation.field.rel.through._meta.auto_created
)
)
return reverse_relations
def _merge_fields_and_pk(pk, fields):
fields_and_pk = OrderedDict()
fields_and_pk['pk'] = pk
fields_and_pk[pk.name] = pk
fields_and_pk.update(fields)
return fields_and_pk
def _merge_relationships(forward_relations, reverse_relations):
return OrderedDict(
list(forward_relations.items()) +
list(reverse_relations.items())
)
def is_abstract_model(model):
"""
Given a model class, returns a boolean True if it is abstract and False if it is not.
"""
return hasattr(model, '_meta') and hasattr(model._meta, 'abstract') and model._meta.abstract
| bsd-2-clause | -5,724,848,676,343,219,000 | 32.355932 | 96 | 0.658706 | false | 4.021798 | false | false | false | 0.000847 |
the-black-eagle/script.cu.lrclyrics | resources/lib/gui.py | 1 | 22439 | #-*- coding: UTF-8 -*-
import sys
import os
import re
import thread, threading
import xbmc, xbmcgui, xbmcvfs
from threading import Timer
from utilities import *
from embedlrc import *
CWD = sys.modules[ "__main__" ].CWD
ADDON = sys.modules[ "__main__" ].ADDON
ADDONNAME = sys.modules[ "__main__" ].ADDONNAME
PROFILE = sys.modules[ "__main__" ].PROFILE
LANGUAGE = sys.modules[ "__main__" ].LANGUAGE
class MAIN():
def __init__(self, *args, **kwargs):
self.mode = kwargs['mode']
self.setup_main()
WIN.setProperty('culrc.running', 'true')
self.get_scraper_list()
if ( ADDON.getSetting( "save_lyrics_path" ) == "" ):
ADDON.setSetting(id="save_lyrics_path", value=os.path.join( PROFILE.encode("utf-8"), "lyrics" ))
self.main_loop()
self.cleanup_main()
def setup_main(self):
self.fetchedLyrics = []
self.current_lyrics = Lyrics()
self.MyPlayer = MyPlayer(function=self.myPlayerChanged, clear=self.clear)
self.Monitor = MyMonitor(function=self.update_settings)
def cleanup_main(self):
# Clean up the monitor and Player classes on exit
del self.MyPlayer
del self.Monitor
def get_scraper_list(self):
self.scrapers = []
for scraper in os.listdir(LYRIC_SCRAPER_DIR):
if os.path.isdir(os.path.join(LYRIC_SCRAPER_DIR, scraper)) and ADDON.getSetting( scraper ) == "true":
exec ( "from culrcscrapers.%s import lyricsScraper as lyricsScraper_%s" % (scraper, scraper))
exec ( "self.scrapers.append([lyricsScraper_%s.__priority__,lyricsScraper_%s.LyricsFetcher(),lyricsScraper_%s.__title__,lyricsScraper_%s.__lrc__])" % (scraper, scraper, scraper, scraper))
self.scrapers.sort()
def main_loop(self):
self.triggered = False
# main loop
while (not self.Monitor.abortRequested()) and (WIN.getProperty('culrc.quit') == ''):
# Check if there is a manual override request
if WIN.getProperty('culrc.manual') == 'true':
log('searching for manually defined lyrics')
self.get_manual_lyrics()
# check if we are on the music visualization screen
# do not try and get lyrics for any background media
elif xbmc.getCondVisibility("Window.IsVisible(12006)") and xbmcgui.Window(10025).getProperty("PlayingBackgroundMedia") in [None, ""]:
if not self.triggered:
self.triggered = True
# notify user the script is running
if ADDON.getSetting( "silent" ) == 'false':
xbmc.executebuiltin((u'Notification(%s,%s,%i)' % (ADDONNAME , LANGUAGE(32004), 2000)).encode('utf-8', 'ignore'))
# start fetching lyrics
self.myPlayerChanged()
elif WIN.getProperty('culrc.force') == 'TRUE':
# we're already running, user clicked button on osd
WIN.setProperty('culrc.force','FALSE')
self.current_lyrics = Lyrics()
self.myPlayerChanged()
elif xbmc.getCondVisibility("Player.IsInternetStream"):
self.myPlayerChanged()
else:
# we may have exited the music visualization screen
self.triggered = False
# reset current lyrics so we show them again when re-entering the visualization screen
self.current_lyrics = Lyrics()
xbmc.sleep(1000)
WIN.clearProperty('culrc.quit')
WIN.clearProperty('culrc.lyrics')
WIN.clearProperty('culrc.islrc')
WIN.clearProperty('culrc.source')
WIN.clearProperty('culrc.haslist')
WIN.clearProperty('culrc.running')
def get_lyrics(self, song):
#xbmc.sleep( 60 )
log('searching memory for lyrics')
lyrics = self.get_lyrics_from_memory( song )
if lyrics:
log('found lyrics in memory')
return lyrics
if song.title:
lyrics = self.find_lyrics( song )
if ADDON.getSetting( 'strip' ) == "true":
if isinstance (lyrics.lyrics,str):
fulltext = lyrics.lyrics.decode("utf-8")
else:
fulltext = lyrics.lyrics
strip_k = re.sub(ur"[\u1100-\u11ff]+", "", fulltext)
strip_c = re.sub(ur"[\u3000-\u9fff]+", "", strip_k)
lyrics.lyrics = strip_c.encode("utf-8")
else:
lyrics = Lyrics()
lyrics.song = song
lyrics.source = ''
lyrics.lyrics = ''
self.save_lyrics_to_memory(lyrics)
return lyrics
def find_lyrics(self, song):
# search embedded lrc lyrics
ext = os.path.splitext(song.filepath.decode("utf-8"))[1].lower()
sup_ext = ['.mp3', '.flac']
if ( ADDON.getSetting( "search_embedded" ) == "true") and song.analyze_safe and (ext in sup_ext):
log('searching for embedded lrc lyrics')
try:
lyrics = getEmbedLyrics(song, True)
except:
lyrics = None
if ( lyrics ):
log('found embedded lrc lyrics')
return lyrics
# search lrc lyrics from file
if ( ADDON.getSetting( "search_file" ) == "true" ):
lyrics = self.get_lyrics_from_file(song, True)
if ( lyrics ):
log('found lrc lyrics from file')
return lyrics
# search lrc lyrics by scrapers
for scraper in self.scrapers:
if scraper[3]:
lyrics = scraper[1].get_lyrics( song )
if ( lyrics ):
log('found lrc lyrics online')
self.save_lyrics_to_file( lyrics )
return lyrics
# search embedded txt lyrics
if ( ADDON.getSetting( "search_embedded" ) == "true" and song.analyze_safe ):
log('searching for embedded txt lyrics')
try:
lyrics = getEmbedLyrics(song, False)
except:
lyrics = None
if lyrics:
log('found embedded txt lyrics')
return lyrics
# search txt lyrics from file
if ( ADDON.getSetting( "search_file" ) == "true" ):
lyrics = self.get_lyrics_from_file(song, False)
if ( lyrics ):
log('found txt lyrics from file')
return lyrics
# search txt lyrics by scrapers
for scraper in self.scrapers:
if not scraper[3]:
lyrics = scraper[1].get_lyrics( song )
if ( lyrics ):
log('found txt lyrics online')
self.save_lyrics_to_file( lyrics )
return lyrics
log('no lyrics found')
lyrics = Lyrics()
lyrics.song = song
lyrics.source = ''
lyrics.lyrics = ''
return lyrics
def get_lyrics_from_memory(self, song):
for l in self.fetchedLyrics:
if ( l.song == song ):
return l
return None
def get_lyrics_from_file(self, song, getlrc):
log('searching files for lyrics')
lyrics = Lyrics()
lyrics.song = song
lyrics.source = LANGUAGE( 32000 )
lyrics.lrc = getlrc
if ADDON.getSetting( "save_lyrics1" ) == "true":
# Search save path by Cu LRC Lyrics
lyricsfile = song.path1(getlrc)
if xbmcvfs.exists(lyricsfile):
lyr = get_textfile( lyricsfile )
if lyr:
lyrics.lyrics = lyr
return lyrics
if ADDON.getSetting( "save_lyrics2" ) == "true":
# Search same path with song file
lyricsfile = song.path2(getlrc)
if xbmcvfs.exists(lyricsfile):
lyr = get_textfile( lyricsfile )
if lyr:
lyrics.lyrics = lyr
return lyrics
return None
def save_lyrics_to_memory(self, lyrics):
savedLyrics = self.get_lyrics_from_memory(lyrics.song)
if ( savedLyrics is None ):
self.fetchedLyrics.append(lyrics)
self.fetchedLyrics = self.fetchedLyrics[:10]
def save_lyrics_to_file(self, lyrics):
if isinstance (lyrics.lyrics, str):
lyr = lyrics.lyrics
else:
lyr = lyrics.lyrics.encode('utf-8')
if ( ADDON.getSetting( "save_lyrics1" ) == "true" ):
file_path = lyrics.song.path1(lyrics.lrc)
success = self.write_lyrics_file( file_path, lyr)
if ( ADDON.getSetting( "save_lyrics2" ) == "true" ):
file_path = lyrics.song.path2(lyrics.lrc)
success = self.write_lyrics_file( file_path, lyr)
def write_lyrics_file(self, file, data):
try:
if ( not xbmcvfs.exists( os.path.dirname( file ) ) ):
xbmcvfs.mkdirs( os.path.dirname( file ) )
lyrics_file = xbmcvfs.File( file, "w" )
lyrics_file.write( data )
lyrics_file.close()
return True
except:
log( "failed to save lyrics" )
return False
def myPlayerChanged(self):
global lyrics
for cnt in range( 5 ):
song = Song.current()
if ( song and ( self.current_lyrics.song != song ) ):
log("Current Song: %s - %s" % (song.artist, song.title))
lyrics = self.get_lyrics( song )
self.current_lyrics = lyrics
if lyrics.lyrics:
# signal the gui thread to display the next lyrics
WIN.setProperty('culrc.newlyrics', 'TRUE')
# check if gui is already running
if not WIN.getProperty('culrc.guirunning') == 'TRUE':
WIN.setProperty('culrc.guirunning', 'TRUE')
gui = guiThread(mode=self.mode)
gui.start()
else:
# signal gui thread to exit
WIN.setProperty('culrc.nolyrics', 'TRUE')
# notify user no lyrics were found
if ADDON.getSetting( "silent" ) == 'false':
xbmc.executebuiltin((u'Notification(%s,%s,%i)' % (ADDONNAME + ": " + LANGUAGE(32001), song.artist.decode("utf-8") + " - " + song.title.decode("utf-8"), 2000)).encode('utf-8', 'ignore'))
break
xbmc.sleep( 50 )
if xbmc.getCondVisibility('MusicPlayer.HasNext'):
next_song = Song.next()
if next_song:
log("Next Song: %s - %s" % (next_song.artist, next_song.title))
self.get_lyrics( next_song )
else:
log( "Missing Artist or Song name in ID3 tag for next track" )
def get_manual_lyrics(self):
# Read in the manually defined artist and track
if WIN.getProperty('culrc.manual') == 'true':
artist = WIN.getProperty('culrc.artist')
track = WIN.getProperty('culrc.track')
# Make sure we have both an artist and track name
if artist and track:
song = Song(artist, track)
if ( song and ( self.current_lyrics.song != song ) ):
log("Current Song: %s - %s" % (song.artist, song.title))
lyrics = self.get_lyrics( song )
self.current_lyrics = lyrics
if lyrics.lyrics:
# Store the details of the lyrics
WIN.setProperty('culrc.newlyrics', 'TRUE')
WIN.setProperty('culrc.lyrics', lyrics.lyrics)
WIN.setProperty('culrc.source', lyrics.source)
def update_settings(self):
self.get_scraper_list()
service = ADDON.getSetting('service')
if service == "true":
self.mode = 'service'
else:
self.mode = 'manual'
# quit the script is mode was changed from service to manual
WIN.setProperty('culrc.quit', 'TRUE')
def clear(self):
WIN.clearProperty('culrc.lyrics')
WIN.clearProperty('culrc.islrc')
WIN.clearProperty('culrc.source')
WIN.clearProperty('culrc.haslist')
class guiThread(threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
self.mode = kwargs[ "mode" ]
def run(self):
ui = GUI( "script-cu-lrclyrics-main.xml" , CWD, "Default", mode=self.mode )
ui.doModal()
del ui
WIN.clearProperty('culrc.guirunning')
class GUI( xbmcgui.WindowXMLDialog ):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.mode = kwargs[ "mode" ]
self.Monitor = MyMonitor(function = None)
def onInit(self):
self.setup_gui()
self.process_lyrics()
self.gui_loop()
def process_lyrics(self):
global lyrics
self.lyrics = lyrics
self.stop_refresh()
self.reset_controls()
if self.lyrics.lyrics:
self.show_lyrics(self.lyrics)
else:
WIN.setProperty('culrc.lyrics', LANGUAGE( 32001 ))
self.getControl( 120 ).reset()
if self.lyrics.list:
WIN.setProperty('culrc.haslist', 'true')
WIN.clearProperty('culrc.islrc')
self.prepare_list(self.lyrics.list)
else:
WIN.clearProperty('culrc.haslist')
def gui_loop(self):
# gui loop
while self.showgui and (not self.Monitor.abortRequested()) and xbmc.getCondVisibility('Player.HasAudio'):
# check if we have new lyrics
if WIN.getProperty("culrc.newlyrics") == "TRUE":
WIN.clearProperty('culrc.newlyrics')
# show new lyrics
self.process_lyrics()
# check if we have no lyrics
elif WIN.getProperty("culrc.nolyrics") == "TRUE":
# no lyrics, close the gui
self.exit_gui('close')
xbmc.sleep(500)
# music ended, close the gui
if (not xbmc.getCondVisibility('Player.HasAudio')):
self.exit_gui('quit')
# xbmc quits, close the gui
elif self.Monitor.abortRequested():
self.exit_gui('quit')
def setup_gui(self):
WIN.clearProperty('culrc.newlyrics')
WIN.clearProperty('culrc.nolyrics')
WIN.clearProperty('culrc.haslist')
self.lock = thread.allocate_lock()
self.timer = None
self.allowtimer = True
self.refreshing = False
self.selected = False
self.controlId = -1
self.pOverlay = []
self.scroll_line = int(self.get_page_lines() / 2)
self.showgui = True
def get_page_lines(self):
self.getControl( 110 ).setVisible( False )
listitem = xbmcgui.ListItem()
while xbmc.getInfoLabel('Container(110).NumPages') != '2':
self.getControl(110).addItem(listitem)
xbmc.sleep(10)
lines = self.getControl( 110 ).size() - 1
return lines
def refresh(self):
self.lock.acquire()
try:
#May be Kodi is not playing any media file
cur_time = xbmc.Player().getTime()
nums = self.getControl( 110 ).size()
pos = self.getControl( 110 ).getSelectedPosition()
if (cur_time < self.pOverlay[pos][0]):
while (pos > 0 and self.pOverlay[pos - 1][0] > cur_time):
pos = pos -1
else:
while (pos < nums - 1 and self.pOverlay[pos + 1][0] < cur_time):
pos = pos +1
if (pos + self.scroll_line > nums - 1):
self.getControl( 110 ).selectItem( nums - 1 )
else:
self.getControl( 110 ).selectItem( pos + self.scroll_line )
self.getControl( 110 ).selectItem( pos )
self.setFocus( self.getControl( 110 ) )
if (self.allowtimer and cur_time < self.pOverlay[nums - 1][0]):
waittime = self.pOverlay[pos + 1][0] - cur_time
self.timer = Timer(waittime, self.refresh)
self.refreshing = True
self.timer.start()
else:
self.refreshing = False
self.lock.release()
except:
self.lock.release()
def stop_refresh(self):
self.lock.acquire()
try:
self.timer.cancel()
except:
pass
self.refreshing = False
self.lock.release()
def show_control(self, controlId):
self.getControl( 110 ).setVisible( controlId == 110 )
self.getControl( 120 ).setVisible( controlId == 120 )
xbmc.sleep( 5 )
self.setFocus( self.getControl( controlId ) )
def show_lyrics(self, lyrics):
WIN.setProperty('culrc.lyrics', lyrics.lyrics)
WIN.setProperty('culrc.source', lyrics.source)
if lyrics.list:
source = '%s (%d)' % (lyrics.source, len(lyrics.list))
else:
source = lyrics.source
self.getControl( 200 ).setLabel( source )
if lyrics.lrc:
WIN.setProperty('culrc.islrc','true')
self.parser_lyrics( lyrics.lyrics )
for time, line in self.pOverlay:
listitem = xbmcgui.ListItem(line)
listitem.setProperty('time', str(time))
self.getControl( 110 ).addItem( listitem )
else:
WIN.clearProperty('culrc.islrc')
splitLyrics = lyrics.lyrics.splitlines()
for x in splitLyrics:
self.getControl( 110 ).addItem( x )
self.getControl( 110 ).selectItem( 0 )
self.show_control( 110 )
if lyrics.lrc:
if (self.allowtimer and self.getControl( 110 ).size() > 1):
self.refresh()
def parser_lyrics(self, lyrics):
self.pOverlay = []
tag = re.compile('\[(\d+):(\d\d)([\.:]\d+|)\]')
lyrics = lyrics.replace( "\r\n" , "\n" )
sep = "\n"
for x in lyrics.split( sep ):
match1 = tag.match( x )
times = []
if ( match1 ):
while ( match1 ):
times.append( float(match1.group(1)) * 60 + float(match1.group(2)) )
y = 5 + len(match1.group(1)) + len(match1.group(3))
x = x[y:]
match1 = tag.match( x )
for time in times:
self.pOverlay.append( (time, x) )
self.pOverlay.sort( cmp=lambda x,y: cmp(x[0], y[0]) )
def prepare_list(self, list):
listitems = []
for song in list:
listitem = xbmcgui.ListItem(song[0])
listitem.setProperty('lyric', str(song))
listitem.setProperty('source', lyrics.source)
listitems.append(listitem)
self.getControl( 120 ).addItems( listitems )
def reshow_choices(self):
if self.getControl( 120 ).size() > 1:
self.getControl( 120 ).selectItem( 0 )
self.stop_refresh()
self.show_control( 120 )
while not self.selected:
xbmc.sleep(50)
self.selected = False
self.getControl( 110 ).reset()
self.show_lyrics( self.lyrics )
# self.save_lyrics_to_file( self.lyrics ) #FIXME
def reset_controls(self):
self.getControl( 110 ).reset()
self.getControl( 200 ).setLabel('')
WIN.clearProperty('culrc.lyrics')
WIN.clearProperty('culrc.islrc')
WIN.clearProperty('culrc.source')
def exit_gui(self, action):
# in manual mode, we also need to quit the script when the user cancels the gui or music has ended
if (self.mode == 'manual') and (action == 'quit'):
# signal the main loop to quit
WIN.setProperty('culrc.quit', 'TRUE')
self.allowtimer = False
self.stop_refresh()
self.showgui = False
self.close()
def onClick(self, controlId):
if ( controlId == 110 ):
# will only works for lrc based lyrics
try:
item = self.getControl( 110 ).getSelectedItem()
stamp = float(item.getProperty('time'))
xbmc.Player().seekTime(stamp)
except:
pass
if ( controlId == 120 ):
item = self.getControl( 120 ).getSelectedItem()
source = item.getProperty('source').lower()
lyric = eval(item.getProperty('lyric'))
exec ( "from culrcscrapers.%s import lyricsScraper as lyricsScraper_%s" % (source, source))
scraper = eval('lyricsScraper_%s.LyricsFetcher()' % source)
self.lyrics.lyrics = scraper.get_lyrics_from_list( lyric )
self.selected = True
def onFocus(self, controlId):
self.controlId = controlId
def onAction(self, action):
actionId = action.getId()
if ( actionId in CANCEL_DIALOG ):
# dialog cancelled, close the gui
self.exit_gui('quit')
elif ( actionId == 101 ) or ( actionId == 117 ): # ACTION_MOUSE_RIGHT_CLICK / ACTION_CONTEXT_MENU
self.reshow_choices()
elif ( actionId in ACTION_OSD ):
xbmc.executebuiltin("ActivateWindow(10120)")
elif ( actionId in ACTION_CODEC ):
xbmc.executebuiltin("Action(codecinfo)")
class MyPlayer(xbmc.Player):
def __init__(self, *args, **kwargs):
xbmc.Player.__init__(self)
self.function = kwargs["function"]
self.clear = kwargs["clear"]
def onPlayBackStarted(self):
self.clear()
if xbmc.getCondVisibility("Window.IsVisible(12006)"):
self.function()
def onPlayBackStopped(self):
self.clear()
class MyMonitor(xbmc.Monitor):
def __init__(self, *args, **kwargs):
xbmc.Monitor.__init__(self)
self.function = kwargs["function"]
def onSettingsChanged(self):
# sleep before retrieving the new settings
xbmc.sleep(500)
self.function()
| gpl-2.0 | 523,123,709,008,233,000 | 39.069643 | 209 | 0.540933 | false | 3.918107 | false | false | false | 0.014439 |
kasioumis/invenio | invenio/modules/formatter/format_elements/bfe_edit_record.py | 9 | 1906 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element to print a link to BibEdit."""
from invenio.utils.url import create_html_link
from invenio.base.i18n import gettext_set_language
from invenio.config import CFG_BASE_URL, CFG_SITE_RECORD
from invenio.legacy.bibedit.utils import user_can_edit_record_collection
def format_element(bfo, style):
"""Print a link to BibEdit, if authorization is granted.
:param style: the CSS style to be applied to the link.
"""
_ = gettext_set_language(bfo.lang)
out = ""
user_info = bfo.user_info
if user_can_edit_record_collection(user_info, bfo.recID):
linkattrd = {}
if style != '':
linkattrd['style'] = style
out += create_html_link(
CFG_BASE_URL +
'/%s/edit/?ln=%s#state=edit&recid=%s' % (CFG_SITE_RECORD, bfo.lang,
str(bfo.recID)),
{},
link_label=_("Edit This Record"),
linkattrd=linkattrd)
return out
def escape_values(bfo):
"""Check if output of this element should be escaped."""
return 0
| gpl-2.0 | -5,455,845,800,145,588,000 | 33.654545 | 79 | 0.66107 | false | 3.789264 | false | false | false | 0 |
chandlercr/aima-python | submissions/Kinley/Lego/lego.py | 2 | 88653 | from ev3dev.auto import OUTPUT_A, OUTPUT_B, OUTPUT_D, LargeMotor, MediumMotor, InfraredSensor
from ev3dev.auto import INPUT_1, INPUT_2, ColorSensor, UltrasonicSensor
import time
import ev3dev.auto as auto
import ev3dev.ev3 as ev3
#from ev3dev.ev3 import *
import ev3dev.core as core
#ultrasonicSensor = UltrasonicSensor(INPUT_1)
colorSensor = ColorSensor(INPUT_2)
clawMotor = MediumMotor(OUTPUT_B)
leftTire = LargeMotor(OUTPUT_A)# and LargeMotor(OUTPUT_D)
rightTire = LargeMotor(OUTPUT_D)
ir = InfraredSensor()
uss = UltrasonicSensor(INPUT_1)
def proximity(self):
"""
A measurement of the distance between the sensor and the remote,
as a percentage. 100% is approximately 70cm/27in.
"""
if self.auto_mode:
self.mode = self.MODE_IR_PROX
return self.value(0)
def getUltrasonic():
uss.mode='US-DIS-CM'
return uss.units
#
# def getColor():
# colorSensor.mode='COL-REFLECT'
# return colorSensor.value()
#
#
# #def findObject():
# while getUltrasonic > 5.5:
#while(proximity(self=0) != 0):
ev3.Sound.speak('Making right turn').wait()
leftTire.run_timed(speed_sp=720, time_sp=600)
time.sleep(1)
ev3.Sound.speak('Making left turn').wait()
rightTire.run_timed(speed_sp=720, time_sp=600)
time.sleep(1)
ev3.Sound.speak('Going forward').wait()
clawMotor.run_timed(speed_sp = -720, time_sp = 500)
leftTire.run_timed(speed_sp=360, time_sp=600)
rightTire.run_timed(speed_sp=360, time_sp=600)
time.sleep(1)
leftTire.run_timed(speed_sp=360, time_sp=600)
rightTire.run_timed(speed_sp=360, time_sp=600)
time.sleep(1)
leftTire.run_timed(speed_sp=360, time_sp=600)
rightTire.run_timed(speed_sp=360, time_sp=600)
time.sleep(1)
clawMotor.run_timed(speed_sp = 720, time_sp = 500)
time.sleep(1)
ev3.Sound.speak('Rotation').wait()
rightTire.run_timed(speed_sp=720, time_sp=600)
time.sleep(1)
rightTire.run_timed(speed_sp=720, time_sp=600)
time.sleep(1)
ev3.Sound.speak('Ha Ha Ha this is mine now').wait()
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
leftTire.run_timed(speed_sp = 900, time_sp = 600)
rightTire.run_timed(speed_sp = 900, time_sp = 600)
clawMotor.run_timed(speed_sp = -720, time_sp = 500)
| mit | 5,705,633,325,550,721,000 | 48.47154 | 93 | 0.703451 | false | 2.408067 | false | false | false | 0.077674 |
sileht/deb-openstack-nova | nova/tests/test_iscsi.py | 8 | 3855 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import string
from nova import test
from nova.volume import iscsi
class TargetAdminTestCase(object):
def setUp(self):
self.cmds = []
self.tid = 1
self.target_name = 'iqn.2011-09.org.foo.bar:blaa'
self.lun = 10
self.path = '/foo/bar/blaa'
self.script_template = None
def get_script_params(self):
return {'tid': self.tid,
'target_name': self.target_name,
'lun': self.lun,
'path': self.path}
def get_script(self):
return self.script_template % self.get_script_params()
def fake_execute(self, *cmd, **kwargs):
self.cmds.append(string.join(cmd))
return "", None
def clear_cmds(self):
cmds = []
def verify_cmds(self, cmds):
self.assertEqual(len(cmds), len(self.cmds))
for a, b in zip(cmds, self.cmds):
self.assertEqual(a, b)
def verify(self):
script = self.get_script()
cmds = []
for line in script.split('\n'):
if not line.strip():
continue
cmds.append(line)
self.verify_cmds(cmds)
def run_commands(self):
tgtadm = iscsi.get_target_admin()
tgtadm.set_execute(self.fake_execute)
tgtadm.new_target(self.target_name, self.tid)
tgtadm.show_target(self.tid)
tgtadm.new_logicalunit(self.tid, self.lun, self.path)
tgtadm.delete_logicalunit(self.tid, self.lun)
tgtadm.delete_target(self.tid)
def test_target_admin(self):
self.clear_cmds()
self.run_commands()
self.verify()
class TgtAdmTestCase(test.TestCase, TargetAdminTestCase):
def setUp(self):
super(TgtAdmTestCase, self).setUp()
TargetAdminTestCase.setUp(self)
self.flags(iscsi_helper='tgtadm')
self.script_template = "\n".join([
"tgtadm --op new --lld=iscsi --mode=target --tid=%(tid)s "
"--targetname=%(target_name)s",
"tgtadm --op bind --lld=iscsi --mode=target --initiator-address=ALL "
"--tid=%(tid)s",
"tgtadm --op show --lld=iscsi --mode=target --tid=%(tid)s",
"tgtadm --op new --lld=iscsi --mode=logicalunit --tid=%(tid)s "
"--lun=%(lun)d --backing-store=%(path)s",
"tgtadm --op delete --lld=iscsi --mode=logicalunit --tid=%(tid)s "
"--lun=%(lun)d",
"tgtadm --op delete --lld=iscsi --mode=target --tid=%(tid)s"])
def get_script_params(self):
params = super(TgtAdmTestCase, self).get_script_params()
params['lun'] += 1
return params
class IetAdmTestCase(test.TestCase, TargetAdminTestCase):
def setUp(self):
super(IetAdmTestCase, self).setUp()
TargetAdminTestCase.setUp(self)
self.flags(iscsi_helper='ietadm')
self.script_template = "\n".join([
"ietadm --op new --tid=%(tid)s --params Name=%(target_name)s",
"ietadm --op show --tid=%(tid)s",
"ietadm --op new --tid=%(tid)s --lun=%(lun)d "
"--params Path=%(path)s,Type=fileio",
"ietadm --op delete --tid=%(tid)s --lun=%(lun)d",
"ietadm --op delete --tid=%(tid)s"])
| apache-2.0 | -8,485,601,612,463,166,000 | 32.232759 | 78 | 0.591958 | false | 3.569444 | true | false | false | 0.002853 |
rjsamson/simplebeerservice | sbsunit/utils/httpRequest.py | 1 | 2303 | import datetime
import time
from sbs.tools import Tools
import pycurl
import json
class HTTPRequest():
debug = False
logFile = 'sbs.log'
apiKey = ''
contentType = 'application/json'
url = ''
instance = None
class __HTTPRequest:
def __init__(self):
pass
def __str__(self):
return repr(self) + self.val
def __init__(self):
if not HTTPRequest.instance:
HTTPRequest.instance = HTTPRequest.__HTTPRequest()
else:
pass
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, name):
return setattr(self.instance, name)
@classmethod
def init(self, apiKey, contentType, url):
self.apiKey = apiKey
self.contentType = contentType
self.url = url
@classmethod
def send(self, data):
# NOTE:
# Depending on the version of python / OS (https://bugs.python.org/issue21246) you may
# run in to issues making requests to HTTPS endpoint, hence we are using pycurl library here
# Commented out to showcase how to make a request via urllib2
#payload = json.dumps(data)
#req = urllib2.Request(url, payload)
#req.add_header('Content-Type', 'application/json')
#try:
# r = urllib2.urlopen(req)
# response = r.read()
# return response
#except Exception, e:
# print "error sending data to %s" % url, e
# return
#Send request using pycurl
c = pycurl.Curl()
c.setopt(c.URL, self.url)
body = json.dumps(data)
Tools.log('Payload: %s' % data)
try:
c.setopt(pycurl.HTTPHEADER, [('x-api-key: %s' % self.apiKey), 'Content-Type: %s' % self.contentType])
c.setopt(c.POSTFIELDS, body)
c.perform()
response_code = c.getinfo(c.RESPONSE_CODE)
if (response_code==200):
Tools.log('Successful Post [%f].' % c.getinfo(c.TOTAL_TIME),2)
else:
Tools.log('Error writing to AWS. Response code: %i ' % response_code,2)
except Exception, e:
Tools.log('Error writing to AWS: %s' % e,1)
pass
return response_code
| apache-2.0 | -951,520,476,215,622,400 | 26.746988 | 113 | 0.561007 | false | 3.877104 | false | false | false | 0.006947 |
dstftw/youtube-dl | youtube_dl/extractor/vidlii.py | 28 | 4530 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
get_element_by_id,
int_or_none,
strip_or_none,
unified_strdate,
urljoin,
)
class VidLiiIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?vidlii\.com/(?:watch|embed)\?.*?\bv=(?P<id>[0-9A-Za-z_-]{11})'
_TESTS = [{
'url': 'https://www.vidlii.com/watch?v=tJluaH4BJ3v',
'md5': '9bf7d1e005dfa909b6efb0a1ff5175e2',
'info_dict': {
'id': 'tJluaH4BJ3v',
'ext': 'mp4',
'title': 'Vidlii is against me',
'description': 'md5:fa3f119287a2bfb922623b52b1856145',
'thumbnail': 're:https://.*.jpg',
'uploader': 'APPle5auc31995',
'uploader_url': 'https://www.vidlii.com/user/APPle5auc31995',
'upload_date': '20171107',
'duration': 212,
'view_count': int,
'comment_count': int,
'average_rating': float,
'categories': ['News & Politics'],
'tags': ['Vidlii', 'Jan', 'Videogames'],
}
}, {
'url': 'https://www.vidlii.com/embed?v=tJluaH4BJ3v&a=0',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://www.vidlii.com/watch?v=%s' % video_id, video_id)
video_url = self._search_regex(
r'src\s*:\s*(["\'])(?P<url>(?:https?://)?(?:(?!\1).)+)\1', webpage,
'video url', group='url')
title = self._search_regex(
(r'<h1>([^<]+)</h1>', r'<title>([^<]+) - VidLii<'), webpage,
'title')
description = self._html_search_meta(
('description', 'twitter:description'), webpage,
default=None) or strip_or_none(
get_element_by_id('des_text', webpage))
thumbnail = self._html_search_meta(
'twitter:image', webpage, default=None)
if not thumbnail:
thumbnail_path = self._search_regex(
r'img\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
'thumbnail', fatal=False, group='url')
if thumbnail_path:
thumbnail = urljoin(url, thumbnail_path)
uploader = self._search_regex(
r'<div[^>]+class=["\']wt_person[^>]+>\s*<a[^>]+\bhref=["\']/user/[^>]+>([^<]+)',
webpage, 'uploader', fatal=False)
uploader_url = 'https://www.vidlii.com/user/%s' % uploader if uploader else None
upload_date = unified_strdate(self._html_search_meta(
'datePublished', webpage, default=None) or self._search_regex(
r'<date>([^<]+)', webpage, 'upload date', fatal=False))
duration = int_or_none(self._html_search_meta(
'video:duration', webpage, 'duration',
default=None) or self._search_regex(
r'duration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
(r'<strong>(\d+)</strong> views',
r'Views\s*:\s*<strong>(\d+)</strong>'),
webpage, 'view count', fatal=False))
comment_count = int_or_none(self._search_regex(
(r'<span[^>]+id=["\']cmt_num[^>]+>(\d+)',
r'Comments\s*:\s*<strong>(\d+)'),
webpage, 'comment count', fatal=False))
average_rating = float_or_none(self._search_regex(
r'rating\s*:\s*([\d.]+)', webpage, 'average rating', fatal=False))
category = self._html_search_regex(
r'<div>Category\s*:\s*</div>\s*<div>\s*<a[^>]+>([^<]+)', webpage,
'category', fatal=False)
categories = [category] if category else None
tags = [
strip_or_none(tag)
for tag in re.findall(
r'<a[^>]+\bhref=["\']/results\?.*?q=[^>]*>([^<]+)',
webpage) if strip_or_none(tag)
] or None
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_url': uploader_url,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'comment_count': comment_count,
'average_rating': average_rating,
'categories': categories,
'tags': tags,
}
| unlicense | -8,041,148,305,091,441,000 | 35.24 | 100 | 0.505519 | false | 3.450114 | false | false | false | 0.000662 |
jnayak1/osf.io | website/search/search.py | 14 | 3454 | import logging
from framework.celery_tasks.handlers import enqueue_task
from website import settings
from website.search import share_search
logger = logging.getLogger(__name__)
if settings.SEARCH_ENGINE == 'elastic':
import elastic_search as search_engine
else:
search_engine = None
logger.warn('Elastic search is not set to load')
def requires_search(func):
def wrapped(*args, **kwargs):
if search_engine is not None:
return func(*args, **kwargs)
return wrapped
@requires_search
def search(query, index=None, doc_type=None):
index = index or settings.ELASTIC_INDEX
return search_engine.search(query, index=index, doc_type=doc_type)
@requires_search
def update_node(node, index=None, bulk=False, async=True):
if async:
node_id = node._id
# We need the transaction to be committed before trying to run celery tasks.
# For example, when updating a Node's privacy, is_public must be True in the
# database in order for method that updates the Node's elastic search document
# to run correctly.
if settings.USE_CELERY:
enqueue_task(search_engine.update_node_async.s(node_id=node_id, index=index, bulk=bulk))
else:
search_engine.update_node_async(node_id=node_id, index=index, bulk=bulk)
else:
index = index or settings.ELASTIC_INDEX
return search_engine.update_node(node, index=index, bulk=bulk)
@requires_search
def bulk_update_nodes(serialize, nodes, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.bulk_update_nodes(serialize, nodes, index=index)
@requires_search
def delete_node(node, index=None):
index = index or settings.ELASTIC_INDEX
doc_type = node.project_or_component
if node.is_registration:
doc_type = 'registration'
search_engine.delete_doc(node._id, node, index=index, category=doc_type)
def update_contributors(nodes):
search_engine.bulk_update_contributors(nodes)
@requires_search
def update_user(user, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.update_user(user, index=index)
@requires_search
def update_file(file_, index=None, delete=False):
index = index or settings.ELASTIC_INDEX
search_engine.update_file(file_, index=index, delete=delete)
@requires_search
def update_institution(institution, index=None):
index = index or settings.ELASTIC_INDEX
search_engine.update_institution(institution, index=index)
@requires_search
def delete_all():
search_engine.delete_all()
@requires_search
def delete_index(index):
search_engine.delete_index(index)
@requires_search
def create_index(index=None):
index = index or settings.ELASTIC_INDEX
search_engine.create_index(index=index)
@requires_search
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
exclude = exclude or []
result = search_engine.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=current_user)
return result
def search_share(query, raw=False, index='share'):
return share_search.search(query, raw=raw, index=index)
def count_share(query, index='share'):
return share_search.count(query, index=index)
def share_stats(query=None):
query = query or {}
return share_search.stats(query=query)
def share_providers():
return share_search.providers()
| apache-2.0 | 6,900,069,037,076,708,000 | 30.981481 | 100 | 0.707875 | false | 3.609195 | false | false | false | 0.006948 |
santicalvo/extract_from_doc | src/curso_toxml/xmlcreator.py | 1 | 2581 | # -*- coding:utf-8 -*-
import xml.dom.minidom as minidom
import xml.etree.ElementTree as xml
class CursoSeatXmlMinidom(object):
def __init__(self):
self.doc = minidom.Document()
self.root = self.doc.createElement("paginas")
self.doc.appendChild(self.root)
def limpia_saltos(self,texto):
if texto.find("\n") != -1 or texto.find("\r") != -1:
texto = texto.replace("\n", "<br />")
texto = texto.replace("\r", "<br />")
txt = self.doc.createCDATASection(texto)
else:
txt = self.doc.createTextNode(texto)
return txt
def addPagina(self, num_pagina, texto, titulo=""):
pagina = self.doc.createElement("pagina")
pagina.setAttribute("num", num_pagina)
nodo_texto = self.doc.createElement("texto")
ptext = self.limpia_saltos(texto)
nodo_texto.appendChild(ptext)
if titulo !="" and titulo != " ":
tit = self.doc.createElement("titulo")
ttext = self.doc.createTextNode(titulo)
tit.appendChild(ttext)
pagina.appendChild(tit)
pagina.appendChild(nodo_texto)
self.root.appendChild(pagina)
def save(self,path, notyet=False):
if notyet:
return False
try:
xmlstr = self.doc.toxml("utf-8")
f=open(path, "w")
f.write(xmlstr)
f.close()
except Exception as ex:
print path, ex
class CursoSeatXmlEtree(object):
def __init__(self):
self.root = xml.Element("paginas")
def limpia_saltos(self,texto):
texto = texto.replace("\n", "<br />")
texto = texto.replace("\r", "<br />")
return texto.encode("utf-8")
def addPagina(self, num_pagina, texto, titulo=""):
pagina = xml.Element("pagina")
pagina.attrib["num"] = num_pagina
nodo_texto = xml.SubElement(pagina, "texto")
nodo_texto.text = self.limpia_saltos(texto)
if titulo !="" and titulo != " ":
tit = xml.SubElement(pagina, "titulo")
tit.text = titulo
self.root.append(pagina)
def save(self,path, notyet=False):
if notyet:
return False
try:
fil = open(path, "w")
fil.write( '<?xml version="1.0"?>' )
#print xml.tostring(self.root)
fil.write( xml.tostring(self.root, "utf-8") )
fil.close()
#print xml.tostring(self.root)
except Exception as ex:
print path, ex
| gpl-3.0 | 7,537,236,663,743,024,000 | 33.413333 | 60 | 0.545912 | false | 3.436751 | false | false | false | 0.008524 |
MikeAmy/django | tests/i18n/sampleproject/update_catalogs.py | 344 | 1780 | #!/usr/bin/env python
"""
Helper script to update sampleproject's translation catalogs.
When a bug has been identified related to i18n, this helps capture the issue
by using catalogs created from management commands.
Example:
The string "Two %% Three %%%" renders differently using trans and blocktrans.
This issue is difficult to debug, it could be a problem with extraction,
interpolation, or both.
How this script helps:
* Add {% trans "Two %% Three %%%" %} and blocktrans equivalent to templates.
* Run this script.
* Test extraction - verify the new msgid in sampleproject's django.po.
* Add a translation to sampleproject's django.po.
* Run this script.
* Test interpolation - verify templatetag rendering, test each in a template
that is rendered using an activated language from sampleproject's locale.
* Tests should fail, issue captured.
* Fix issue.
* Run this script.
* Tests all pass.
"""
import os
import re
import sys
proj_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(proj_dir, '..', '..', '..')))
def update_translation_catalogs():
"""Run makemessages and compilemessages in sampleproject."""
from django.core.management import call_command
prev_cwd = os.getcwd()
os.chdir(proj_dir)
call_command('makemessages')
call_command('compilemessages')
# keep the diff friendly - remove 'POT-Creation-Date'
pofile = os.path.join(proj_dir, 'locale', 'fr', 'LC_MESSAGES', 'django.po')
with open(pofile) as f:
content = f.read()
content = re.sub(r'^"POT-Creation-Date.+$\s', '', content, flags=re.MULTILINE)
with open(pofile, 'w') as f:
f.write(content)
os.chdir(prev_cwd)
if __name__ == "__main__":
update_translation_catalogs()
| bsd-3-clause | 4,228,443,035,337,914,000 | 28.666667 | 82 | 0.696067 | false | 3.655031 | false | false | false | 0.000562 |
dubourg/openturns | python/test/t_LinearNumericalMathEvaluationImplementation_std.py | 2 | 1208 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
try:
inputDimension = 3
outputDimension = 2
# Center
center = NumericalPoint(inputDimension)
center[0] = -1
center[1] = 0.5
center[2] = 1
# Constant term
constant = NumericalPoint(outputDimension)
constant[0] = -1.0
constant[1] = 2.0
# Linear term
linear = Matrix(inputDimension, outputDimension)
linear[0, 0] = 1.0
linear[1, 0] = 2.0
linear[2, 0] = 3.0
linear[0, 1] = 4.0
linear[1, 1] = 5.0
linear[2, 1] = 6.0
# myFunction = linear * (X- center) + constant
myFunction = LinearNumericalMathEvaluationImplementation(
center, constant, linear)
myFunction.setName("linearFunction")
inPoint = NumericalPoint(inputDimension)
inPoint[0] = 7.0
inPoint[1] = 8.0
inPoint[2] = 9.0
outPoint = myFunction(inPoint)
print("myFunction=", repr(myFunction))
print(myFunction.getName(), "( ", repr(inPoint), " ) = ", repr(outPoint))
# except TestFailed, ex :
except:
import sys
print("t__LinearNumericalMathEvaluationImplementation_std.py",
sys.exc_info()[0], sys.exc_info()[1])
| gpl-3.0 | -4,573,036,099,627,199,500 | 25.844444 | 77 | 0.63245 | false | 3.229947 | false | true | false | 0.000828 |
Bluscream/Discord-Selfbot | cogs/serverinfo.py | 1 | 14791 | import prettytable
import discord
import os
import re
from urllib.parse import urlparse
from PythonGists import PythonGists
from discord.ext import commands
from cogs.utils.checks import embed_perms, cmd_prefix_len
'''Module for server commands.'''
class Server:
def __init__(self, bot):
self.bot = bot
self.invites = ['discord.gg/', 'discordapp.com/invite/']
self.invite_domains = ['discord.gg', 'discordapp.com']
def find_server(self, msg):
server = None
if msg:
try:
float(msg)
server = self.bot.get_guild(int(msg))
if not server:
return self.bot.bot_prefix + 'Server not found.', False
except:
for i in self.bot.guilds:
if i.name.lower() == msg.lower().strip():
server = i
break
if not server:
return self.bot.bot_prefix + 'Could not find server. Note: You must be a member of the server you are trying to search.', False
return server, True
# Stats about server
@commands.group(aliases=['server', 'sinfo', 'si'], pass_context=True, invoke_without_command=True)
async def serverinfo(self, ctx, *, msg=""):
"""Various info about the server. [p]help server for more info."""
if ctx.invoked_subcommand is None:
if msg:
server = None
try:
float(msg)
server = self.bot.get_guild(int(msg))
if not server:
return await ctx.send(
self.bot.bot_prefix + 'Server not found.')
except:
for i in self.bot.guilds:
if i.name.lower() == msg.lower():
server = i
break
if not server:
return await ctx.send(self.bot.bot_prefix + 'Could not find server. Note: You must be a member of the server you are trying to search.')
else:
server = ctx.message.guild
online = 0
for i in server.members:
if str(i.status) == 'online' or str(i.status) == 'idle' or str(i.status) == 'dnd':
online += 1
all_users = []
for user in server.members:
all_users.append('{}#{}'.format(user.name, user.discriminator))
all_users.sort()
all = '\n'.join(all_users)
channel_count = len([x for x in server.channels if type(x) == discord.channel.TextChannel])
role_count = len(server.roles)
emoji_count = len(server.emojis)
if embed_perms(ctx.message):
em = discord.Embed(color=0xea7938)
em.add_field(name='Name', value=server.name)
em.add_field(name='Owner', value=server.owner, inline=False)
em.add_field(name='Members', value=server.member_count)
em.add_field(name='Currently Online', value=online)
em.add_field(name='Text Channels', value=str(channel_count))
em.add_field(name='Region', value=server.region)
em.add_field(name='Verification Level', value=str(server.verification_level))
em.add_field(name='Highest role', value=server.role_hierarchy[0])
em.add_field(name='Number of roles', value=str(role_count))
em.add_field(name='Number of emotes', value=str(emoji_count))
url = PythonGists.Gist(description='All Users in: %s' % server.name, content=str(all), name='server.txt')
gist_of_users = '[List of all {} users in this server]({})'.format(server.member_count, url)
em.add_field(name='Users', value=gist_of_users)
em.add_field(name='Created At', value=server.created_at.__format__('%A, %d. %B %Y @ %H:%M:%S'))
em.set_thumbnail(url=server.icon_url)
em.set_author(name='Server Info', icon_url='https://i.imgur.com/RHagTDg.png')
em.set_footer(text='Server ID: %s' % server.id)
await ctx.send(embed=em)
else:
msg = '**Server Info:** ```Name: %s\nOwner: %s\nMembers: %s\nCurrently Online: %s\nRegion: %s\nVerification Level: %s\nHighest Role: %s\nCreated At: %s\nServer avatar: : %s```' % (
server.name, server.owner, server.member_count, online, server.region, str(server.verification_level), server.role_hierarchy[0], server.created_at.__format__('%A, %d. %B %Y @ %H:%M:%S'), server.icon_url)
await ctx.send(self.bot.bot_prefix + msg)
await ctx.message.delete()
@serverinfo.command(pass_context=True)
async def emojis(self, ctx, msg: str = None):
"""List all emojis in this server. Ex: [p]server emojis"""
if msg:
server, found = self.find_server(msg)
if not found:
return await ctx.send(server)
else:
server = ctx.message.guild
emojis = [str(x) for x in server.emojis]
await ctx.send("".join(emojis))
await ctx.message.delete()
@serverinfo.command(pass_context=True)
async def avi(self, ctx, msg: str = None):
"""Get server avatar image link."""
if msg:
server, found = self.find_server(msg)
if not found:
return await ctx.send(server)
else:
server = ctx.message.guild
if embed_perms(ctx.message):
em = discord.Embed()
em.set_image(url=server.icon_url)
await ctx.send(embed=em)
else:
await ctx.send(self.bot.bot_prefix + server.icon_url)
await ctx.message.delete()
@serverinfo.command(pass_context=True)
async def role(self, ctx, *, msg):
"""Get more info about a specific role. Ex: [p]server role Admins"""
for role in ctx.message.guild.roles:
if msg.lower() == role.name.lower() or msg == role.id:
all_users = [str(x) for x in role.members]
all_users.sort()
all_users = ', '.join(all_users)
em = discord.Embed(title='Role Info', color=role.color)
em.add_field(name='Name', value=role.name)
em.add_field(name='ID', value=role.id, inline=False)
em.add_field(name='Users in this role', value=str(len(role.members)))
em.add_field(name='Role color hex value', value=str(role.color))
em.add_field(name='Role color RGB value', value=role.color.to_rgb())
em.add_field(name='Mentionable', value=role.mentionable)
if len(role.members) > 10:
all_users = all_users.replace(', ', '\n')
url = PythonGists.Gist(description='Users in role: {} for server: {}'.format(role.name, ctx.message.guild.name), content=str(all_users), name='role.txt')
em.add_field(name='All users', value='{} users. [List of users posted to Gist.]({})'.format(len(role.members), url), inline=False)
elif len(role.members) >= 1:
em.add_field(name='All users', value=all_users, inline=False)
else:
em.add_field(name='All users', value='There are no users in this role!', inline=False)
em.add_field(name='Created at', value=role.created_at.__format__('%x at %X'))
em.set_thumbnail(url='http://www.colorhexa.com/%s.png' % str(role.color).strip("#"))
await ctx.message.delete()
return await ctx.send(content=None, embed=em)
await ctx.message.delete()
await ctx.send(self.bot.bot_prefix + 'Could not find role ``%s``' % msg)
@commands.command(aliases=['channel', 'cinfo', 'ci'], pass_context=True, no_pm=True)
async def channelinfo(self, ctx, *, channel: int = None):
"""Shows channel information"""
if not channel:
channel = ctx.message.channel
else:
channel = self.bot.get_channel(channel)
data = discord.Embed()
if hasattr(channel, 'mention'):
data.description = "**Information about Channel:** " + channel.mention
if hasattr(channel, 'changed_roles'):
if len(channel.changed_roles) > 0:
data.color = discord.Colour.green() if channel.changed_roles[0].permissions.read_messages else discord.Colour.red()
if isinstance(channel, discord.TextChannel):
_type = "Text"
elif isinstance(channel, discord.VoiceChannel):
_type = "Voice"
else:
_type = "Unknown"
data.add_field(name="Type", value=_type)
data.add_field(name="ID", value=channel.id, inline=False)
if hasattr(channel, 'position'):
data.add_field(name="Position", value=channel.position)
if isinstance(channel, discord.VoiceChannel):
if channel.user_limit != 0:
data.add_field(name="User Number", value="{}/{}".format(len(channel.voice_members), channel.user_limit))
else:
data.add_field(name="User Number", value="{}".format(len(channel.voice_members)))
userlist = [r.display_name for r in channel.members]
if not userlist:
userlist = "None"
else:
userlist = "\n".join(userlist)
data.add_field(name="Users", value=userlist)
data.add_field(name="Bitrate", value=channel.bitrate)
elif isinstance(channel, discord.TextChannel):
try:
pins = await channel.pins()
data.add_field(name="Pins", value=len(pins), inline=True)
except discord.Forbidden:
pass
data.add_field(name="Members", value="%s"%len(channel.members))
if channel.topic:
data.add_field(name="Topic", value=channel.topic, inline=False)
hidden = []
allowed = []
for role in channel.changed_roles:
if role.permissions.read_messages is True:
if role.name != "@everyone":
allowed.append(role.mention)
elif role.permissions.read_messages is False:
if role.name != "@everyone":
hidden.append(role.mention)
if len(allowed) > 0:
data.add_field(name='Allowed Roles ({})'.format(len(allowed)), value=', '.join(allowed), inline=False)
if len(hidden) > 0:
data.add_field(name='Restricted Roles ({})'.format(len(hidden)), value=', '.join(hidden), inline=False)
if channel.created_at:
data.set_footer(text=("Created on {} ({} days ago)".format(channel.created_at.strftime("%d %b %Y %H:%M"), (ctx.message.created_at - channel.created_at).days)))
await ctx.send(embed=data)
@commands.command(aliases=['invitei', 'ii'], pass_context=True)
async def inviteinfo(self, ctx, *, invite: str = None):
"""Shows invite information."""
if invite:
for url in re.findall(r'(https?://\S+)', invite):
try:
invite = await self.bot.get_invite(urlparse(url).path.replace('/', '').replace('<', '').replace('>', ''))
except discord.NotFound:
return await ctx.send(self.bot.bot_prefix + "Couldn't find valid invite, please double check the link.")
break
else:
async for msg in ctx.message.channel.history():
if any(x in msg.content for x in self.invites):
for url in re.findall(r'(https?://\S+)', msg.content):
url = urlparse(url)
if any(x in url for x in self.invite_domains):
print(url)
url = url.path.replace('/', '').replace('<', '').replace('>', '').replace('\'', '').replace(')', '')
print(url)
try:
invite = await self.bot.get_invite(url)
except discord.NotFound:
return await ctx.send(self.bot.bot_prefix + "Couldn't find valid invite, please double check the link.")
break
if not invite:
return await ctx.send(self.bot.bot_prefix + "Couldn't find an invite in the last 100 messages. Please specify an invite.")
data = discord.Embed()
content = None
if invite.id is not None:
content = self.bot.bot_prefix + "**Information about Invite:** %s" % invite.id
if invite.revoked is not None:
data.colour = discord.Colour.red() if invite.revoked else discord.Colour.green()
if invite.created_at is not None:
data.set_footer(text="Created on {} ({} days ago)".format(invite.created_at.strftime("%d %b %Y %H:%M"), (invite.created_at - invite.created_at).days))
if invite.max_age is not None:
if invite.max_age > 0:
expires = '%s s' % invite.max_age
else:
expires = "Never"
data.add_field(name="Expires in", value=expires)
if invite.temporary is not None:
data.add_field(name="Temp membership", value="Yes" if invite.temporary else "No")
if invite.uses is not None:
data.add_field(name="Uses", value="%s / %s" % (invite.uses, invite.max_uses))
if invite.inviter.name is not None:
data.set_author(name=invite.inviter.name + '#' + invite.inviter.discriminator + " (%s)" % invite.inviter.id, icon_url=invite.inviter.avatar_url)
if invite.guild.name is not None:
data.add_field(name="Guild", value="Name: " + invite.guild.name + "\nID: %s" % invite.guild.id, inline=False)
if invite.guild.icon_url is not None:
data.set_thumbnail(url=invite.guild.icon_url)
if invite.channel.name is not None:
channel = "%s\n#%s" % (invite.channel.mention, invite.channel.name) if isinstance(invite.channel, discord.TextChannel) else invite.channel.name
data.add_field(name="Channel", value="Name: " + channel + "\nID: %s" % invite.channel.id, inline=False)
try:
await ctx.send(content=content, embed=data)
except:
await ctx.send(content="I need the `Embed links` permission to send this")
def setup(bot):
bot.add_cog(Server(bot))
| gpl-3.0 | 4,937,400,908,164,385,000 | 50.357639 | 223 | 0.54905 | false | 3.911928 | false | false | false | 0.003854 |
jrbl/invenio | modules/webmessage/lib/webmessage_tests.py | 15 | 5414 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for WebMessage."""
__revision__ = \
"$Id$"
import unittest
from invenio import webmessage_mailutils
from invenio.testutils import make_test_suite, run_test_suite
class TestQuotingMessage(unittest.TestCase):
"""Test for quoting messages."""
def test_simple_quoting_per_block(self):
"""webmessage - test quoting simple message (HTML, per block)"""
text = """Dear romeo
I received your mail
>>Would you like to come with me to the restaurant?
Of course!
>>>>When could we get together?
Reply to my question please.
see you..."""
expected_text = """Dear romeo<br/>
I received your mail<br/>
<div class="commentbox">
\tWould you like to come with me to the restaurant?<br/>
</div>
Of course!<br/>
<div class="commentbox">
\t<div class="commentbox">
\t\tWhen could we get together?<br/>
\t</div>
</div>
Reply to my question please.<br/>
see you...<br/>
"""
res = webmessage_mailutils.email_quoted_txt2html(text,
tabs_before=0,
indent_txt='>>',
linebreak_txt="\n",
indent_html=('<div class="commentbox">', "</div>"),
linebreak_html='<br/>')
self.assertEqual(res, expected_text)
def test_simple_quoting_per_line(self):
"""webmessage - test quoting simple message (HTML, per line)"""
text = """Dear romeo
I received your mail
>>Would you like to come with me to the restaurant?
>>I discovered a really nice one.
Of course!
>>>>When could we get together?
Reply to my question please.
see you..."""
expected_text = """Dear romeo <br/>
I received your mail <br/>
<blockquote><div>Would you like to come with me to the restaurant? </div></blockquote> <br/>
<blockquote><div>I discovered a really nice one. </div></blockquote> <br/>
Of course! <br/>
<blockquote><div><blockquote><div>When could we get together? </div></blockquote> </div></blockquote> <br/>
Reply to my question please. <br/>
see you... <br/>
"""
res = webmessage_mailutils.email_quoted_txt2html(text,
tabs_before=0,
indent_txt='>>',
linebreak_txt="\n",
indent_html=('<blockquote><div>', ' </div></blockquote>'),
linebreak_html=" <br/>",
indent_block=False)
self.assertEqual(res, expected_text)
def test_quoting_message(self):
"""webmessage - test quoting message (text)"""
text = """C'est un lapin, lapin de bois.
>>Quoi?
Un cadeau.
>>What?
A present.
>>Oh, un cadeau"""
expected_text = """>>C'est un lapin, lapin de bois.
>>>>Quoi?
>>Un cadeau.
>>>>What?
>>A present.
>>>>Oh, un cadeau
"""
res = webmessage_mailutils.email_quote_txt(text,
indent_txt='>>',
linebreak_input="\n",
linebreak_output="\n")
self.assertEqual(res, expected_text)
def test_indenting_rule_message(self):
"""webmessage - return email-like indenting rule"""
text = """>>Brave Sir Robin ran away...
<img src="malicious_script"/>*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
"""
expected_text = """>>Brave Sir Robin ran away...
<img src="malicious_script" />*No!*
>>bravely ran away away...
I didn't!*<script>malicious code</script>
>>When danger reared its ugly head, he bravely turned his tail and fled.
<form onload="malicious"></form>*I never did!*
"""
res = webmessage_mailutils.escape_email_quoted_text(text,
indent_txt='>>',
linebreak_txt='\n')
self.assertEqual(res, expected_text)
TEST_SUITE = make_test_suite(TestQuotingMessage)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 | -3,899,528,884,807,895,000 | 37.126761 | 122 | 0.559106 | false | 3.963397 | true | false | false | 0.004433 |
fos/fos-legacy | fos/actor/axes.py | 1 | 2467 | import numpy as np
from pyglet.gl import *
from fos import Actor, World
from pyglet.lib import load_library
glib=load_library('GL')
class Axes(Actor):
def __init__(self, scale = 1.0, line_width=2.):
""" Draw three axes
"""
self.affine=np.eye(4)
self.scale = scale
self.vertices=np.array([[0,0,0],[0.5,0,0],[1,0,0],\
[0,0,0],[0,0.5,0],[0,1,0],\
[0,0,0],[0,0,0.5],[0,0,1]],dtype='f4')
self.vertices=self.scale*self.vertices
self.colors=np.array([[1,0,0,1],[1,0,0,1],[1,0,0,1],\
[0,1,0,1],[0,1,0,1],[0,1,0,1],\
[0,0,1,1],[0,0,1,1],[0,0,1,1]],dtype='f4')
self.vn=len(self.vertices)
self.cn=len(self.colors)
assert self.vn==self.cn
self.vptr=self.vertices.ctypes.data
self.cptr=self.colors.ctypes.data
self.count=np.array([3,3,3],dtype=np.int32)
self.first=np.array([0,3,6],dtype=np.int32)
self.firstptr=self.first.ctypes.data
self.countptr=self.count.ctypes.data
self.primcount=len(self.first)
self.items=3
self.line_width=line_width
self.show_aabb = False
self.make_aabb((np.array([-scale,-scale,-scale]),np.array([scale,scale,scale])),margin = 0)
def update(self, dt):
pass
def draw(self):
glEnable(GL_DEPTH_TEST)
glLineWidth(self.line_width)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
glVertexPointer(3,GL_FLOAT,0,self.vptr)
glColorPointer(4,GL_FLOAT,0,self.cptr)
glPushMatrix()
#glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glib.glMultiDrawArrays(GL_LINE_STRIP, self.firstptr,self.countptr, self.items)
#This is the same as
#glDrawArrays(GL_LINE_STRIP,0,3)
#glDrawArrays(GL_LINE_STRIP,3,3)
#glDrawArrays(GL_LINE_STRIP,6,3)
#if self.show_aabb:self.draw_aabb()
#Or same as
#for i in range(self.items):
# glDrawArrays(GL_LINE_STRIP,self.first[i],self.count[i])
glPopMatrix()
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glLineWidth(1.)
glDisable(GL_DEPTH_TEST)
if __name__=='__main__':
pass
| bsd-3-clause | -3,707,371,162,150,738,000 | 34.242857 | 103 | 0.539927 | false | 3.122785 | false | false | false | 0.055533 |
GREO/gnuradio-git | gr-radio-astronomy/src/python/usrp_psr_receiver.py | 9 | 41027 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
#
#
# Pulsar receiver application
#
# Performs both harmonic folding analysis
# and epoch folding analysis
#
#
from gnuradio import gr, gru, blks2, audio
from usrpm import usrp_dbid
from gnuradio import usrp, optfir
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import stdgui2, ra_fftsink, ra_stripchartsink, form, slider
from optparse import OptionParser
import wx
import sys
import Numeric
import numpy.fft
import ephem
import time
import os
import math
class app_flow_graph(stdgui2.std_top_block):
def __init__(self, frame, panel, vbox, argv):
stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv)
self.frame = frame
self.panel = panel
parser = OptionParser(option_class=eng_option)
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=(0, 0),
help="select USRP Rx side A or B (default=A)")
parser.add_option("-d", "--decim", type="int", default=16,
help="set fgpa decimation rate to DECIM [default=%default]")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-Q", "--observing", type="eng_float", default=0.0,
help="set observing frequency to FREQ")
parser.add_option("-a", "--avg", type="eng_float", default=1.0,
help="set spectral averaging alpha")
parser.add_option("-V", "--favg", type="eng_float", default=2.0,
help="set folder averaging alpha")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-l", "--reflevel", type="eng_float", default=30.0,
help="Set pulse display reference level")
parser.add_option("-L", "--lowest", type="eng_float", default=1.5,
help="Lowest valid frequency bin")
parser.add_option("-e", "--longitude", type="eng_float", default=-76.02, help="Set Observer Longitude")
parser.add_option("-c", "--latitude", type="eng_float", default=44.85, help="Set Observer Latitude")
parser.add_option("-F", "--fft_size", type="eng_float", default=1024, help="Size of FFT")
parser.add_option ("-t", "--threshold", type="eng_float", default=2.5, help="pulsar threshold")
parser.add_option("-p", "--lowpass", type="eng_float", default=100, help="Pulse spectra cutoff freq")
parser.add_option("-P", "--prefix", default="./", help="File prefix")
parser.add_option("-u", "--pulsefreq", type="eng_float", default=0.748, help="Observation pulse rate")
parser.add_option("-D", "--dm", type="eng_float", default=1.0e-5, help="Dispersion Measure")
parser.add_option("-O", "--doppler", type="eng_float", default=1.0, help="Doppler ratio")
parser.add_option("-B", "--divbase", type="eng_float", default=20, help="Y/Div menu base")
parser.add_option("-I", "--division", type="eng_float", default=100, help="Y/Div")
parser.add_option("-A", "--audio_source", default="plughw:0,0", help="Audio input device spec")
parser.add_option("-N", "--num_pulses", default=1, type="eng_float", help="Number of display pulses")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.show_debug_info = True
self.reflevel = options.reflevel
self.divbase = options.divbase
self.division = options.division
self.audiodev = options.audio_source
self.mult = int(options.num_pulses)
# Low-pass cutoff for post-detector filter
# Set to 100Hz usually, since lots of pulsars fit in this
# range
self.lowpass = options.lowpass
# What is lowest valid frequency bin in post-detector FFT?
# There's some pollution very close to DC
self.lowest_freq = options.lowest
# What (dB) threshold to use in determining spectral candidates
self.threshold = options.threshold
# Filename prefix for recording file
self.prefix = options.prefix
# Dispersion Measure (DM)
self.dm = options.dm
# Doppler shift, as a ratio
# 1.0 == no doppler shift
# 1.005 == a little negative shift
# 0.995 == a little positive shift
self.doppler = options.doppler
#
# Input frequency and observing frequency--not necessarily the
# same thing, if we're looking at the IF of some downconverter
# that's ahead of the USRP and daughtercard. This distinction
# is important in computing the correct de-dispersion filter.
#
self.frequency = options.freq
if options.observing <= 0:
self.observing_freq = options.freq
else:
self.observing_freq = options.observing
# build the graph
self.u = usrp.source_c(decim_rate=options.decim)
self.u.set_mux(usrp.determine_rx_mux_value(self.u, options.rx_subdev_spec))
#
# Recording file, in case we ever need to record baseband data
#
self.recording = gr.file_sink(gr.sizeof_char, "/dev/null")
self.recording_state = False
self.pulse_recording = gr.file_sink(gr.sizeof_short, "/dev/null")
self.pulse_recording_state = False
#
# We come up with recording turned off, but the user may
# request recording later on
self.recording.close()
self.pulse_recording.close()
#
# Need these two for converting 12-bit baseband signals to 8-bit
#
self.tofloat = gr.complex_to_float()
self.tochar = gr.float_to_char()
# Need this for recording pulses (post-detector)
self.toshort = gr.float_to_short()
#
# The spectral measurer sets this when it has a valid
# average spectral peak-to-peak distance
# We can then use this to program the parameters for the epoch folder
#
# We set a sentimental value here
self.pulse_freq = options.pulsefreq
# Folder runs at this raw sample rate
self.folder_input_rate = 20000
# Each pulse in the epoch folder is sampled at 128 times the nominal
# pulse rate
self.folding = 128
#
# Try to find candidate parameters for rational resampler
#
save_i = 0
candidates = []
for i in range(20,300):
input_rate = self.folder_input_rate
output_rate = int(self.pulse_freq * i)
interp = gru.lcm(input_rate, output_rate) / input_rate
decim = gru.lcm(input_rate, output_rate) / output_rate
if (interp < 500 and decim < 250000):
candidates.append(i)
# We didn't find anything, bail!
if (len(candidates) < 1):
print "Couldn't converge on resampler parameters"
sys.exit(1)
#
# Now try to find candidate with the least sampling error
#
mindiff = 999.999
for i in candidates:
diff = self.pulse_freq * i
diff = diff - int(diff)
if (diff < mindiff):
mindiff = diff
save_i = i
# Recompute rates
input_rate = self.folder_input_rate
output_rate = int(self.pulse_freq * save_i)
# Compute new interp and decim, based on best candidate
interp = gru.lcm(input_rate, output_rate) / input_rate
decim = gru.lcm(input_rate, output_rate) / output_rate
# Save optimized folding parameters, used later
self.folding = save_i
self.interp = int(interp)
self.decim = int(decim)
# So that we can view N pulses in the pulse viewer window
FOLD_MULT=self.mult
# determine the daughterboard subdevice we're using
self.subdev = usrp.selected_subdev(self.u, options.rx_subdev_spec)
self.cardtype = self.u.daughterboard_id(0)
# Compute raw input rate
input_rate = self.u.adc_freq() / self.u.decim_rate()
# BW==input_rate for complex data
self.bw = input_rate
#
# Set baseband filter bandwidth if DBS_RX:
#
if self.cardtype == usrp_dbid.DBS_RX:
lbw = input_rate / 2
if lbw < 1.0e6:
lbw = 1.0e6
self.subdev.set_bw(lbw)
#
# We use this as a crude volume control for the audio output
#
#self.volume = gr.multiply_const_ff(10**(-1))
#
# Create location data for ephem package
#
self.locality = ephem.Observer()
self.locality.long = str(options.longitude)
self.locality.lat = str(options.latitude)
#
# What is the post-detector LPF cutoff for the FFT?
#
PULSAR_MAX_FREQ=int(options.lowpass)
# First low-pass filters down to input_rate/FIRST_FACTOR
# and decimates appropriately
FIRST_FACTOR=int(input_rate/(self.folder_input_rate/2))
first_filter = gr.firdes.low_pass (1.0,
input_rate,
input_rate/FIRST_FACTOR,
input_rate/(FIRST_FACTOR*20),
gr.firdes.WIN_HAMMING)
# Second filter runs at the output rate of the first filter,
# And low-pass filters down to PULSAR_MAX_FREQ*10
#
second_input_rate = int(input_rate/(FIRST_FACTOR/2))
second_filter = gr.firdes.band_pass(1.0, second_input_rate,
0.10,
PULSAR_MAX_FREQ*10,
PULSAR_MAX_FREQ*1.5,
gr.firdes.WIN_HAMMING)
# Third filter runs at PULSAR_MAX_FREQ*20
# and filters down to PULSAR_MAX_FREQ
#
third_input_rate = PULSAR_MAX_FREQ*20
third_filter = gr.firdes_band_pass(1.0, third_input_rate,
0.10, PULSAR_MAX_FREQ,
PULSAR_MAX_FREQ/10.0,
gr.firdes.WIN_HAMMING)
#
# Create the appropriate FFT scope
#
self.scope = ra_fftsink.ra_fft_sink_f (panel,
fft_size=int(options.fft_size), sample_rate=PULSAR_MAX_FREQ*2,
title="Post-detector spectrum",
ofunc=self.pulsarfunc, xydfunc=self.xydfunc, fft_rate=200)
#
# Tell scope we're looking from DC to PULSAR_MAX_FREQ
#
self.scope.set_baseband_freq (0.0)
#
# Setup stripchart for showing pulse profiles
#
hz = "%5.3fHz " % self.pulse_freq
per = "(%5.3f sec)" % (1.0/self.pulse_freq)
sr = "%d sps" % (int(self.pulse_freq*self.folding))
times = " %d Pulse Intervals" % self.mult
self.chart = ra_stripchartsink.stripchart_sink_f (panel,
sample_rate=1,
stripsize=self.folding*FOLD_MULT, parallel=True, title="Pulse Profiles: "+hz+per+times,
xlabel="Seconds @ "+sr, ylabel="Level", autoscale=True,
divbase=self.divbase, scaling=1.0/(self.folding*self.pulse_freq))
self.chart.set_ref_level(self.reflevel)
self.chart.set_y_per_div(self.division)
# De-dispersion filter setup
#
# Do this here, just before creating the filter
# that will use the taps.
#
ntaps = self.compute_disp_ntaps(self.dm,self.bw,self.observing_freq)
# Taps for the de-dispersion filter
self.disp_taps = Numeric.zeros(ntaps,Numeric.Complex64)
# Compute the de-dispersion filter now
self.compute_dispfilter(self.dm,self.doppler,
self.bw,self.observing_freq)
#
# Call constructors for receive chains
#
#
# Now create the FFT filter using the computed taps
self.dispfilt = gr.fft_filter_ccc(1, self.disp_taps)
#
# Audio sink
#
#print "input_rate ", second_input_rate, "audiodev ", self.audiodev
#self.audio = audio.sink(second_input_rate, self.audiodev)
#
# The three post-detector filters
# Done this way to allow an audio path (up to 10Khz)
# ...and also because going from xMhz down to ~100Hz
# In a single filter doesn't seem to work.
#
self.first = gr.fir_filter_fff (FIRST_FACTOR/2, first_filter)
p = second_input_rate / (PULSAR_MAX_FREQ*20)
self.second = gr.fir_filter_fff (int(p), second_filter)
self.third = gr.fir_filter_fff (10, third_filter)
# Detector
self.detector = gr.complex_to_mag_squared()
self.enable_comb_filter = False
# Epoch folder comb filter
if self.enable_comb_filter == True:
bogtaps = Numeric.zeros(512, Numeric.Float64)
self.folder_comb = gr.fft_filter_ccc(1,bogtaps)
# Rational resampler
self.folder_rr = blks2.rational_resampler_fff(self.interp, self.decim)
# Epoch folder bandpass
bogtaps = Numeric.zeros(1, Numeric.Float64)
self.folder_bandpass = gr.fir_filter_fff (1, bogtaps)
# Epoch folder F2C/C2F
self.folder_f2c = gr.float_to_complex()
self.folder_c2f = gr.complex_to_float()
# Epoch folder S2P
self.folder_s2p = gr.serial_to_parallel (gr.sizeof_float,
self.folding*FOLD_MULT)
# Epoch folder IIR Filter (produces average pulse profiles)
self.folder_iir = gr.single_pole_iir_filter_ff(1.0/options.favg,
self.folding*FOLD_MULT)
#
# Set all the epoch-folder goop up
#
self.set_folding_params()
#
# Start connecting configured modules in the receive chain
#
# Connect raw USRP to de-dispersion filter, detector
self.connect(self.u, self.dispfilt, self.detector)
# Connect detector output to FIR LPF
# in two stages, followed by the FFT scope
self.connect(self.detector, self.first,
self.second, self.third, self.scope)
# Connect audio output
#self.connect(self.first, self.volume)
#self.connect(self.volume, (self.audio, 0))
#self.connect(self.volume, (self.audio, 1))
# Connect epoch folder
if self.enable_comb_filter == True:
self.connect (self.first, self.folder_bandpass, self.folder_rr,
self.folder_f2c,
self.folder_comb, self.folder_c2f,
self.folder_s2p, self.folder_iir,
self.chart)
else:
self.connect (self.first, self.folder_bandpass, self.folder_rr,
self.folder_s2p, self.folder_iir, self.chart)
# Connect baseband recording file (initially /dev/null)
self.connect(self.u, self.tofloat, self.tochar, self.recording)
# Connect pulse recording file (initially /dev/null)
self.connect(self.first, self.toshort, self.pulse_recording)
#
# Build the GUI elements
#
self._build_gui(vbox)
# Make GUI agree with command-line
self.myform['average'].set_value(int(options.avg))
self.myform['foldavg'].set_value(int(options.favg))
# Make spectral averager agree with command line
if options.avg != 1.0:
self.scope.set_avg_alpha(float(1.0/options.avg))
self.scope.set_average(True)
# set initial values
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.subdev.gain_range()
options.gain = float(g[0]+g[1])/2
if options.freq is None:
# if no freq was specified, use the mid-point
r = self.subdev.freq_range()
options.freq = float(r[0]+r[1])/2
self.set_gain(options.gain)
#self.set_volume(-10.0)
if not(self.set_freq(options.freq)):
self._set_status_msg("Failed to set initial frequency")
self.myform['decim'].set_value(self.u.decim_rate())
self.myform['fs@usb'].set_value(self.u.adc_freq() / self.u.decim_rate())
self.myform['dbname'].set_value(self.subdev.name())
self.myform['DM'].set_value(self.dm)
self.myform['Doppler'].set_value(self.doppler)
#
# Start the timer that shows current LMST on the GUI
#
self.lmst_timer.Start(1000)
def _set_status_msg(self, msg):
self.frame.GetStatusBar().SetStatusText(msg, 0)
def _build_gui(self, vbox):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
def _form_set_dm(kv):
return self.set_dm(kv['DM'])
def _form_set_doppler(kv):
return self.set_doppler(kv['Doppler'])
# Position the FFT or Waterfall
vbox.Add(self.scope.win, 5, wx.EXPAND)
vbox.Add(self.chart.win, 5, wx.EXPAND)
# add control area at the bottom
self.myform = myform = form.form()
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((7,0), 0, wx.EXPAND)
vbox1 = wx.BoxSizer(wx.VERTICAL)
myform['freq'] = form.float_field(
parent=self.panel, sizer=vbox1, label="Center freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq, self._set_status_msg))
vbox1.Add((3,0), 0, 0)
# To show current Local Mean Sidereal Time
myform['lmst_high'] = form.static_text_field(
parent=self.panel, sizer=vbox1, label="Current LMST", weight=1)
vbox1.Add((3,0), 0, 0)
# To show current spectral cursor data
myform['spec_data'] = form.static_text_field(
parent=self.panel, sizer=vbox1, label="Pulse Freq", weight=1)
vbox1.Add((3,0), 0, 0)
# To show best pulses found in FFT output
myform['best_pulse'] = form.static_text_field(
parent=self.panel, sizer=vbox1, label="Best freq", weight=1)
vbox1.Add((3,0), 0, 0)
vboxBogus = wx.BoxSizer(wx.VERTICAL)
vboxBogus.Add ((2,0), 0, wx.EXPAND)
vbox2 = wx.BoxSizer(wx.VERTICAL)
g = self.subdev.gain_range()
myform['gain'] = form.slider_field(parent=self.panel, sizer=vbox2, label="RF Gain",
weight=1,
min=int(g[0]), max=int(g[1]),
callback=self.set_gain)
vbox2.Add((6,0), 0, 0)
myform['average'] = form.slider_field(parent=self.panel, sizer=vbox2,
label="Spectral Averaging", weight=1, min=1, max=200, callback=self.set_averaging)
vbox2.Add((6,0), 0, 0)
myform['foldavg'] = form.slider_field(parent=self.panel, sizer=vbox2,
label="Folder Averaging", weight=1, min=1, max=20, callback=self.set_folder_averaging)
vbox2.Add((6,0), 0, 0)
#myform['volume'] = form.quantized_slider_field(parent=self.panel, sizer=vbox2,
#label="Audio Volume", weight=1, range=(-20, 0, 0.5), callback=self.set_volume)
#vbox2.Add((6,0), 0, 0)
myform['DM'] = form.float_field(
parent=self.panel, sizer=vbox2, label="DM", weight=1,
callback=myform.check_input_and_call(_form_set_dm))
vbox2.Add((6,0), 0, 0)
myform['Doppler'] = form.float_field(
parent=self.panel, sizer=vbox2, label="Doppler", weight=1,
callback=myform.check_input_and_call(_form_set_doppler))
vbox2.Add((6,0), 0, 0)
# Baseband recording control
buttonbox = wx.BoxSizer(wx.HORIZONTAL)
self.record_control = form.button_with_callback(self.panel,
label="Recording baseband: Off ",
callback=self.toggle_recording)
self.record_pulse_control = form.button_with_callback(self.panel,
label="Recording pulses: Off ",
callback=self.toggle_pulse_recording)
buttonbox.Add(self.record_control, 0, wx.CENTER)
buttonbox.Add(self.record_pulse_control, 0, wx.CENTER)
vbox.Add(buttonbox, 0, wx.CENTER)
hbox.Add(vbox1, 0, 0)
hbox.Add(vboxBogus, 0, 0)
hbox.Add(vbox2, wx.ALIGN_RIGHT, 0)
vbox.Add(hbox, 0, wx.EXPAND)
self._build_subpanel(vbox)
self.lmst_timer = wx.PyTimer(self.lmst_timeout)
self.lmst_timeout()
def _build_subpanel(self, vbox_arg):
# build a secondary information panel (sometimes hidden)
# FIXME figure out how to have this be a subpanel that is always
# created, but has its visibility controlled by foo.Show(True/False)
if not(self.show_debug_info):
return
panel = self.panel
vbox = vbox_arg
myform = self.myform
#panel = wx.Panel(self.panel, -1)
#vbox = wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['decim'] = form.static_float_field(
parent=panel, sizer=hbox, label="Decim")
hbox.Add((5,0), 1)
myform['fs@usb'] = form.static_float_field(
parent=panel, sizer=hbox, label="Fs@USB")
hbox.Add((5,0), 1)
myform['dbname'] = form.static_text_field(
parent=panel, sizer=hbox)
hbox.Add((5,0), 1)
myform['baseband'] = form.static_float_field(
parent=panel, sizer=hbox, label="Analog BB")
hbox.Add((5,0), 1)
myform['ddc'] = form.static_float_field(
parent=panel, sizer=hbox, label="DDC")
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
@param target_freq: frequency in Hz
@rypte: bool
Tuning is a two step process. First we ask the front-end to
tune as close to the desired frequency as it can. Then we use
the result of that operation and our target_frequency to
determine the value for the digital down converter.
"""
r = usrp.tune(self.u, 0, self.subdev, target_freq)
if r:
self.myform['freq'].set_value(target_freq) # update displayed value
self.myform['baseband'].set_value(r.baseband_freq)
self.myform['ddc'].set_value(r.dxc_freq)
# Adjust self.frequency, and self.observing_freq
# We pick up the difference between the current self.frequency
# and the just-programmed one, and use this to adjust
# self.observing_freq. We have to do it this way to
# make the dedispersion filtering work out properly.
delta = target_freq - self.frequency
self.frequency = target_freq
self.observing_freq += delta
# Now that we're adjusted, compute a new dispfilter, and
# set the taps for the FFT filter.
ntaps = self.compute_disp_ntaps(self.dm, self.bw, self.observing_freq)
self.disp_taps = Numeric.zeros(ntaps, Numeric.Complex64)
self.compute_dispfilter(self.dm,self.doppler,self.bw,
self.observing_freq)
self.dispfilt.set_taps(self.disp_taps)
return True
return False
# Callback for gain-setting slider
def set_gain(self, gain):
self.myform['gain'].set_value(gain) # update displayed value
self.subdev.set_gain(gain)
#def set_volume(self, vol):
#self.myform['volume'].set_value(vol)
#self.volume.set_k((10**(vol/10))/8192)
# Callback for spectral-averaging slider
def set_averaging(self, avval):
self.myform['average'].set_value(avval)
self.scope.set_avg_alpha(1.0/(avval))
self.scope.set_average(True)
def set_folder_averaging(self, avval):
self.myform['foldavg'].set_value(avval)
self.folder_iir.set_taps(1.0/avval)
# Timer callback to update LMST display
def lmst_timeout(self):
self.locality.date = ephem.now()
sidtime = self.locality.sidereal_time()
self.myform['lmst_high'].set_value(str(ephem.hours(sidtime)))
#
# Turn recording on/off
# Called-back by "Recording" button
#
def toggle_recording(self):
# Pick up current LMST
self.locality.date = ephem.now()
sidtime = self.locality.sidereal_time()
# Pick up localtime, for generating filenames
foo = time.localtime()
# Generate filenames for both data and header file
filename = "%04d%02d%02d%02d%02d.pdat" % (foo.tm_year, foo.tm_mon,
foo.tm_mday, foo.tm_hour, foo.tm_min)
hdrfilename = "%04d%02d%02d%02d%02d.phdr" % (foo.tm_year, foo.tm_mon,
foo.tm_mday, foo.tm_hour, foo.tm_min)
# Current recording? Flip state
if (self.recording_state == True):
self.recording_state = False
self.record_control.SetLabel("Recording baseband: Off ")
self.recording.close()
# Not recording?
else:
self.recording_state = True
self.record_control.SetLabel("Recording baseband to: "+filename)
# Cause gr_file_sink object to accept new filename
# note use of self.prefix--filename prefix from
# command line (defaults to ./)
#
self.recording.open (self.prefix+filename)
#
# We open the header file as a regular file, write header data,
# then close
hdrf = open(self.prefix+hdrfilename, "w")
hdrf.write("receiver center frequency: "+str(self.frequency)+"\n")
hdrf.write("observing frequency: "+str(self.observing_freq)+"\n")
hdrf.write("DM: "+str(self.dm)+"\n")
hdrf.write("doppler: "+str(self.doppler)+"\n")
hdrf.write("sidereal: "+str(ephem.hours(sidtime))+"\n")
hdrf.write("bandwidth: "+str(self.u.adc_freq() / self.u.decim_rate())+"\n")
hdrf.write("sample type: complex_char\n")
hdrf.write("sample size: "+str(gr.sizeof_char*2)+"\n")
hdrf.close()
#
# Turn recording on/off
# Called-back by "Recording" button
#
def toggle_pulse_recording(self):
# Pick up current LMST
self.locality.date = ephem.now()
sidtime = self.locality.sidereal_time()
# Pick up localtime, for generating filenames
foo = time.localtime()
# Generate filenames for both data and header file
filename = "%04d%02d%02d%02d%02d.padat" % (foo.tm_year, foo.tm_mon,
foo.tm_mday, foo.tm_hour, foo.tm_min)
hdrfilename = "%04d%02d%02d%02d%02d.pahdr" % (foo.tm_year, foo.tm_mon,
foo.tm_mday, foo.tm_hour, foo.tm_min)
# Current recording? Flip state
if (self.pulse_recording_state == True):
self.pulse_recording_state = False
self.record_pulse_control.SetLabel("Recording pulses: Off ")
self.pulse_recording.close()
# Not recording?
else:
self.pulse_recording_state = True
self.record_pulse_control.SetLabel("Recording pulses to: "+filename)
# Cause gr_file_sink object to accept new filename
# note use of self.prefix--filename prefix from
# command line (defaults to ./)
#
self.pulse_recording.open (self.prefix+filename)
#
# We open the header file as a regular file, write header data,
# then close
hdrf = open(self.prefix+hdrfilename, "w")
hdrf.write("receiver center frequency: "+str(self.frequency)+"\n")
hdrf.write("observing frequency: "+str(self.observing_freq)+"\n")
hdrf.write("DM: "+str(self.dm)+"\n")
hdrf.write("doppler: "+str(self.doppler)+"\n")
hdrf.write("pulse rate: "+str(self.pulse_freq)+"\n")
hdrf.write("pulse sps: "+str(self.pulse_freq*self.folding)+"\n")
hdrf.write("file sps: "+str(self.folder_input_rate)+"\n")
hdrf.write("sidereal: "+str(ephem.hours(sidtime))+"\n")
hdrf.write("bandwidth: "+str(self.u.adc_freq() / self.u.decim_rate())+"\n")
hdrf.write("sample type: short\n")
hdrf.write("sample size: 1\n")
hdrf.close()
# We get called at startup, and whenever the GUI "Set Folding params"
# button is pressed
#
def set_folding_params(self):
if (self.pulse_freq <= 0):
return
# Compute required sample rate
self.sample_rate = int(self.pulse_freq*self.folding)
# And the implied decimation rate
required_decimation = int(self.folder_input_rate / self.sample_rate)
# We also compute a new FFT comb filter, based on the expected
# spectral profile of our pulse parameters
#
# FFT-based comb filter
#
N_COMB_TAPS=int(self.sample_rate*4)
if N_COMB_TAPS > 2000:
N_COMB_TAPS = 2000
self.folder_comb_taps = Numeric.zeros(N_COMB_TAPS,Numeric.Complex64)
fincr = (self.sample_rate)/float(N_COMB_TAPS)
for i in range(0,len(self.folder_comb_taps)):
self.folder_comb_taps[i] = complex(0.0, 0.0)
freq = 0.0
harmonics = [1.0,2.0,3.0,4.0,5.0,6.0,7.0]
for i in range(0,len(self.folder_comb_taps)/2):
for j in range(0,len(harmonics)):
if abs(freq - harmonics[j]*self.pulse_freq) <= fincr:
self.folder_comb_taps[i] = complex(4.0, 0.0)
if harmonics[j] == 1.0:
self.folder_comb_taps[i] = complex(8.0, 0.0)
freq += fincr
if self.enable_comb_filter == True:
# Set the just-computed FFT comb filter taps
self.folder_comb.set_taps(self.folder_comb_taps)
# And compute a new decimated bandpass filter, to go in front
# of the comb. Primary function is to decimate and filter down
# to an exact-ish multiple of the target pulse rate
#
self.folding_taps = gr.firdes_band_pass (1.0, self.folder_input_rate,
0.10, self.sample_rate/2, 10,
gr.firdes.WIN_HAMMING)
# Set the computed taps for the bandpass/decimate filter
self.folder_bandpass.set_taps (self.folding_taps)
#
# Record a spectral "hit" of a possible pulsar spectral profile
#
def record_hit(self,hits, hcavg, hcmax):
# Pick up current LMST
self.locality.date = ephem.now()
sidtime = self.locality.sidereal_time()
# Pick up localtime, for generating filenames
foo = time.localtime()
# Generate filenames for both data and header file
hitfilename = "%04d%02d%02d%02d.phit" % (foo.tm_year, foo.tm_mon,
foo.tm_mday, foo.tm_hour)
hitf = open(self.prefix+hitfilename, "a")
hitf.write("receiver center frequency: "+str(self.frequency)+"\n")
hitf.write("observing frequency: "+str(self.observing_freq)+"\n")
hitf.write("DM: "+str(self.dm)+"\n")
hitf.write("doppler: "+str(self.doppler)+"\n")
hitf.write("sidereal: "+str(ephem.hours(sidtime))+"\n")
hitf.write("bandwidth: "+str(self.u.adc_freq() / self.u.decim_rate())+"\n")
hitf.write("spectral peaks: "+str(hits)+"\n")
hitf.write("HCM: "+str(hcavg)+" "+str(hcmax)+"\n")
hitf.close()
# This is a callback used by ra_fftsink.py (passed on creation of
# ra_fftsink)
# Whenever the user moves the cursor within the FFT display, this
# shows the coordinate data
#
def xydfunc(self,xyv):
s = "%.6fHz\n%.3fdB" % (xyv[0], xyv[1])
if self.lowpass >= 500:
s = "%.6fHz\n%.3fdB" % (xyv[0]*1000, xyv[1])
self.myform['spec_data'].set_value(s)
# This is another callback used by ra_fftsink.py (passed on creation
# of ra_fftsink). We pass this as our "calibrator" function, but
# we create interesting side-effects in the GUI.
#
# This function finds peaks in the FFT output data, and reports
# on them through the "Best" text object in the GUI
# It also computes the Harmonic Compliance Measure (HCM), and displays
# that also.
#
def pulsarfunc(self,d,l):
x = range(0,l)
incr = float(self.lowpass)/float(l)
incr = incr * 2.0
bestdb = -50.0
bestfreq = 0.0
avg = 0
dcnt = 0
#
# First, we need to find the average signal level
#
for i in x:
if (i * incr) > self.lowest_freq and (i*incr) < (self.lowpass-2):
avg += d[i]
dcnt += 1
# Set average signal level
avg /= dcnt
s2=" "
findcnt = 0
#
# Then we find candidates that are greater than the user-supplied
# threshold.
#
# We try to cluster "hits" whose whole-number frequency is the
# same, and compute an average "hit" frequency.
#
lastint = 0
hits=[]
intcnt = 0
freqavg = 0
for i in x:
freq = i*incr
# If frequency within bounds, and the (dB-avg) value is above our
# threshold
if freq > self.lowest_freq and freq < self.lowpass-2 and (d[i] - avg) > self.threshold:
# If we're finding a new whole-number frequency
if lastint != int(freq):
# Record "center" of this hit, if this is a new hit
if lastint != 0:
s2 += "%5.3fHz " % (freqavg/intcnt)
hits.append(freqavg/intcnt)
findcnt += 1
lastint = int(freq)
intcnt = 1
freqavg = freq
else:
intcnt += 1
freqavg += freq
if (findcnt >= 14):
break
if intcnt > 1:
s2 += "%5.3fHz " % (freqavg/intcnt)
hits.append(freqavg/intcnt)
#
# Compute the HCM, by dividing each of the "hits" by each of the
# other hits, and comparing the difference between a "perfect"
# harmonic, and the observed frequency ratio.
#
measure = 0
max_measure=0
mcnt = 0
avg_dist = 0
acnt = 0
for i in range(1,len(hits)):
meas = hits[i]/hits[0] - int(hits[i]/hits[0])
if abs((hits[i]-hits[i-1])-hits[0]) < 0.1:
avg_dist += hits[i]-hits[i-1]
acnt += 1
if meas > 0.98 and meas < 1.0:
meas = 1.0 - meas
meas *= hits[0]
if meas >= max_measure:
max_measure = meas
measure += meas
mcnt += 1
if mcnt > 0:
measure /= mcnt
if acnt > 0:
avg_dist /= acnt
if len(hits) > 1:
measure /= mcnt
s3="\nHCM: Avg %5.3fHz(%d) Max %5.3fHz Dist %5.3fHz(%d)" % (measure,mcnt,max_measure, avg_dist, acnt)
if max_measure < 0.5 and len(hits) >= 2:
self.record_hit(hits, measure, max_measure)
self.avg_dist = avg_dist
else:
s3="\nHCM: --"
s4="\nAvg dB: %4.2f" % avg
self.myform['best_pulse'].set_value("("+s2+")"+s3+s4)
# Since we are nominally a calibrator function for ra_fftsink, we
# simply return what they sent us, untouched. A "real" calibrator
# function could monkey with the data before returning it to the
# FFT display function.
return(d)
#
# Callback for the "DM" gui object
#
# We call compute_dispfilter() as appropriate to compute a new filter,
# and then set that new filter into self.dispfilt.
#
def set_dm(self,dm):
self.dm = dm
ntaps = self.compute_disp_ntaps (self.dm, self.bw, self.observing_freq)
self.disp_taps = Numeric.zeros(ntaps, Numeric.Complex64)
self.compute_dispfilter(self.dm,self.doppler,self.bw,self.observing_freq)
self.dispfilt.set_taps(self.disp_taps)
self.myform['DM'].set_value(dm)
return(dm)
#
# Callback for the "Doppler" gui object
#
# We call compute_dispfilter() as appropriate to compute a new filter,
# and then set that new filter into self.dispfilt.
#
def set_doppler(self,doppler):
self.doppler = doppler
ntaps = self.compute_disp_ntaps (self.dm, self.bw, self.observing_freq)
self.disp_taps = Numeric.zeros(ntaps, Numeric.Complex64)
self.compute_dispfilter(self.dm,self.doppler,self.bw,self.observing_freq)
self.dispfilt.set_taps(self.disp_taps)
self.myform['Doppler'].set_value(doppler)
return(doppler)
#
# Compute a de-dispersion filter
# From Hankins, et al, 1975
#
# This code translated from dedisp_filter.c from Swinburne
# pulsar software repository
#
def compute_dispfilter(self,dm,doppler,bw,centerfreq):
npts = len(self.disp_taps)
tmp = Numeric.zeros(npts, Numeric.Complex64)
M_PI = 3.14159265358
DM = dm/2.41e-10
#
# Because astronomers are a crazy bunch, the "standard" calcultion
# is in Mhz, rather than Hz
#
centerfreq = centerfreq / 1.0e6
bw = bw / 1.0e6
isign = int(bw / abs (bw))
# Center frequency may be doppler shifted
cfreq = centerfreq / doppler
# As well as the bandwidth..
bandwidth = bw / doppler
# Bandwidth divided among bins
binwidth = bandwidth / npts
# Delay is an "extra" parameter, in usecs, and largely
# untested in the Swinburne code.
delay = 0.0
# This determines the coefficient of the frequency response curve
# Linear in DM, but quadratic in center frequency
coeff = isign * 2.0*M_PI * DM / (cfreq*cfreq)
# DC to nyquist/2
n = 0
for i in range(0,int(npts/2)):
freq = (n + 0.5) * binwidth
phi = coeff*freq*freq/(cfreq+freq) + (2.0*M_PI*freq*delay)
tmp[i] = complex(math.cos(phi), math.sin(phi))
n += 1
# -nyquist/2 to DC
n = int(npts/2)
n *= -1
for i in range(int(npts/2),npts):
freq = (n + 0.5) * binwidth
phi = coeff*freq*freq/(cfreq+freq) + (2.0*M_PI*freq*delay)
tmp[i] = complex(math.cos(phi), math.sin(phi))
n += 1
self.disp_taps = numpy.fft.ifft(tmp)
return(self.disp_taps)
#
# Compute minimum number of taps required in de-dispersion FFT filter
#
def compute_disp_ntaps(self,dm,bw,freq):
#
# Dt calculations are in Mhz, rather than Hz
# crazy astronomers....
mbw = bw/1.0e6
mfreq = freq/1.0e6
f_lower = mfreq-(mbw/2)
f_upper = mfreq+(mbw/2)
# Compute smear time
Dt = dm/2.41e-4 * (1.0/(f_lower*f_lower)-1.0/(f_upper*f_upper))
# ntaps is now bandwidth*smeartime
# Should be bandwidth*smeartime*2, but the Gnu Radio FFT filter
# already expands it by a factor of 2
ntaps = bw*Dt
if ntaps < 64:
ntaps = 64
return(int(ntaps))
def main ():
app = stdgui2.stdapp(app_flow_graph, "RADIO ASTRONOMY PULSAR RECEIVER: $Revision$", nstatus=1)
app.MainLoop()
if __name__ == '__main__':
main ()
| gpl-3.0 | 460,431,061,271,867,460 | 36.433394 | 136 | 0.572915 | false | 3.525262 | false | false | false | 0.010115 |
renhaocui/Social_Conversation_Connector | UIManager.py | 1 | 6937 | # -*- coding: utf-8 -*-
import requests
import json
import property
messengerThreadUrl = "https://graph.facebook.com/v2.6/me/messenger_profile?access_token="
def setMessengerGetStarted(token):
threadSettingURL = messengerThreadUrl + token
data = {
"get_started":{
"payload": "get_started"
}
}
response = requests.post(threadSettingURL, json=data, verify=False)
return response, json.loads(response.text)
def setMessengerGreeting(token):
threadSettingURL = messengerThreadUrl + token
data = {
"setting_type": "greeting",
"greeting": [{
"locale": "default",
"text": "Welcome to Astute Connect 2017!"
}]
}
response = requests.post(threadSettingURL, json=data, verify=False)
return response, json.loads(response.text)
'''
def setMessengerMenu(token):
threadSettingURL = messengerThreadUrl + token
data = {
"setting_type": "call_to_actions",
"thread_state": "existing_thread",
"call_to_actions": [
{
"type": "postback",
"title": "Home",
"payload": "home_page"
},
{
"type": "web_url",
"title": "Guides",
"url": "https://web.cse.ohio-state.edu/~cuir/site/GuidesHome.html"
},
{
"type": "web_url",
"title": "Roadside",
"url": "https://web.cse.ohio-state.edu/~cuir/site/test2.html"
}
]
}
response = requests.post(threadSettingURL, json=data, verify=False)
return response, json.loads(response.text)
'''
def setMessengerMenu(token):
threadSettingURL = messengerThreadUrl + token
data = {
"persistent_menu": [
{
"locale": "default",
"composer_input_disabled": False,
"call_to_actions": [
{
"type": "postback",
"title": "Book",
"payload": "make a new flight reservation"
},
{
"type": "nested",
"title": "Manage",
"call_to_actions": [
{
"title": "Check in",
"type": "postback",
"payload": "flight check in EK201"
},
{
"title": "Flight Status",
"type": "postback",
"payload": "check flight status EK201"
},
{
"title": "Change Flight",
"type": "postback",
"payload": "change flight EK201"
},
{
"title": "Change Seat",
"type": "postback",
"payload": "change seat EK201"
}
]
},
{
"type": "nested",
"title": "Loyalty",
"call_to_actions": [
{
"title": "Join us",
"type": "web_url",
"url": "https://www.emirates.com/account/english/light-registration/"
},
{
"title": "Log in",
"type": "web_url",
"url": "https://www.emirates.com/account/english/login/login.aspx"
},
{
"title": "Membership",
"type": "web_url",
"url": "https://www.emirates.com/english/skywards/about/membership-tiers/membership-tiers.aspx"
},
{
"title": "Partners",
"type": "web_url",
"url": "https://www.emirates.com/english/skywards/about/partners/our-partners.aspx"
},
{
"title": "Contact us",
"type": "web_url",
"url": "https://www.emirates.com/english/help/contact-emirates/"
}
]
}
]
}
]
}
response = requests.post(threadSettingURL, json=data, verify=False)
return response, json.loads(response.text)
def getWeChatAccessToken(appid, appsecret):
url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=' + appid + '&secret=' + appsecret
response = json.loads(requests.get(url, verify=False).content)
return response['access_token']
def setWeChatMenu(accessToken, lang):
url = 'https://api.weixin.qq.com/cgi-bin/menu/create?access_token=' + accessToken
if lang == 'en':
menu_data = {'button': [{'type': 'click', 'name': 'Home', 'key': 'home_box'},
{'type': 'view', 'name': 'Guides',
'url': 'http://web.cse.ohio-state.edu/~cuir/site/GuidesHome.html'},
{'type': 'view', 'name': 'Roadside',
'url': 'http://web.cse.ohio-state.edu/~cuir/site/test2.html'}]}
else:
menu_data = {'button': [{'type': 'click', 'name': "主页", 'key': 'home_box'},
{'type': 'view', 'name': "目录",
'url': 'http://web.cse.ohio-state.edu/~cuir/site copy/GuidesHome.html'},
{'type': 'view', 'name': "道路救援",
'url': 'http://web.cse.ohio-state.edu/~cuir/site copy/test2.html'}]}
response = requests.post(url, data=json.dumps(menu_data, ensure_ascii=False), verify=False).content
return response
if __name__ == "__main__":
token = 'EAAS7yNVP3rgBAElyTTOJBj4fZCD7iZA0HpR3TVudZBkbZBOWEAI03KUY5MbNAFhu2OGuBgZAAZCKMpulsg0iXUt6ybvcvZC6uaVPZAFjbZCHgsl4ZCZAxt9UB7jRCEuWP78rfcqkxZCZAhcrN6glZCWZAZAqOv9y0BN3GjE8H9lWZBsWvauSY9QZDZD'
#accessToken = getWeChatAccessToken(appid, appsecret)
#print setWeChatMenu(accessToken, 'zh')
print setMessengerGetStarted(token)
print setMessengerMenu(token)
#print setMessengerGreeting(property.facebookTokenList['AC2017'])
#print setMessengerGetStarted(messenger_token)
| mit | 8,846,200,259,499,579,000 | 38.323864 | 202 | 0.44011 | false | 4.139354 | false | false | false | 0.004046 |
campbe13/openhatch | vendor/packages/gdata/tests/atom_tests/auth_test.py | 128 | 1342 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.auth
import atom.http_core
class BasicAuthTest(unittest.TestCase):
def test_modify_request(self):
http_request = atom.http_core.HttpRequest()
credentials = atom.auth.BasicAuth('Aladdin', 'open sesame')
self.assert_(credentials.basic_cookie == 'QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
credentials.modify_request(http_request)
self.assert_(http_request.headers[
'Authorization'] == 'Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==')
def suite():
return unittest.TestSuite((unittest.makeSuite(BasicAuthTest,'test'),))
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | 328,402,165,554,265,540 | 28.822222 | 76 | 0.717586 | false | 3.485714 | true | false | false | 0.003726 |
JaySon-Huang/WebModel | WebModel/utils/redis/sentinel.py | 1 | 11810 | import os
import random
import weakref
from WebModel.utils.redis.client import StrictRedis
from WebModel.utils.redis.connection import ConnectionPool, Connection
from WebModel.utils.redis.exceptions import ConnectionError, ResponseError, ReadOnlyError
from WebModel.utils.redis._compat import iteritems, nativestr, xrange
class MasterNotFoundError(ConnectionError):
pass
class SlaveNotFoundError(ConnectionError):
pass
class SentinelManagedConnection(Connection):
def __init__(self, **kwargs):
self.connection_pool = kwargs.pop('connection_pool')
super(SentinelManagedConnection, self).__init__(**kwargs)
def __repr__(self):
pool = self.connection_pool
s = '%s<service=%s%%s>' % (type(self).__name__, pool.service_name)
if self.host:
host_info = ',host=%s,port=%s' % (self.host, self.port)
s = s % host_info
return s
def connect_to(self, address):
self.host, self.port = address
super(SentinelManagedConnection, self).connect()
if self.connection_pool.check_connection:
self.send_command('PING')
if nativestr(self.read_response()) != 'PONG':
raise ConnectionError('PING failed')
def connect(self):
if self._sock:
return # already connected
if self.connection_pool.is_master:
self.connect_to(self.connection_pool.get_master_address())
else:
for slave in self.connection_pool.rotate_slaves():
try:
return self.connect_to(slave)
except ConnectionError:
continue
raise SlaveNotFoundError # Never be here
def read_response(self):
try:
return super(SentinelManagedConnection, self).read_response()
except ReadOnlyError:
if self.connection_pool.is_master:
# When talking to a master, a ReadOnlyError when likely
# indicates that the previous master that we're still connected
# to has been demoted to a slave and there's a new master.
# calling disconnect will force the connection to re-query
# sentinel during the next connect() attempt.
self.disconnect()
raise ConnectionError('The previous master is now a slave')
raise
class SentinelConnectionPool(ConnectionPool):
"""
Sentinel backed connection pool.
If ``check_connection`` flag is set to True, SentinelManagedConnection
sends a PING command right after establishing the connection.
"""
def __init__(self, service_name, sentinel_manager, **kwargs):
kwargs['connection_class'] = kwargs.get(
'connection_class', SentinelManagedConnection)
self.is_master = kwargs.pop('is_master', True)
self.check_connection = kwargs.pop('check_connection', False)
super(SentinelConnectionPool, self).__init__(**kwargs)
self.connection_kwargs['connection_pool'] = weakref.proxy(self)
self.service_name = service_name
self.sentinel_manager = sentinel_manager
def __repr__(self):
return "%s<service=%s(%s)" % (
type(self).__name__,
self.service_name,
self.is_master and 'master' or 'slave',
)
def reset(self):
super(SentinelConnectionPool, self).reset()
self.master_address = None
self.slave_rr_counter = None
def get_master_address(self):
master_address = self.sentinel_manager.discover_master(
self.service_name)
if self.is_master:
if self.master_address is None:
self.master_address = master_address
elif master_address != self.master_address:
# Master address changed, disconnect all clients in this pool
self.disconnect()
return master_address
def rotate_slaves(self):
"Round-robin slave balancer"
slaves = self.sentinel_manager.discover_slaves(self.service_name)
if slaves:
if self.slave_rr_counter is None:
self.slave_rr_counter = random.randint(0, len(slaves) - 1)
for _ in xrange(len(slaves)):
self.slave_rr_counter = (
self.slave_rr_counter + 1) % len(slaves)
slave = slaves[self.slave_rr_counter]
yield slave
# Fallback to the master connection
try:
yield self.get_master_address()
except MasterNotFoundError:
pass
raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
def _checkpid(self):
if self.pid != os.getpid():
self.disconnect()
self.reset()
self.__init__(self.service_name, self.sentinel_manager,
connection_class=self.connection_class,
max_connections=self.max_connections,
**self.connection_kwargs)
class Sentinel(object):
"""
Redis Sentinel cluster client
>>> from redis.sentinel import Sentinel
>>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
>>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
>>> master.set('foo', 'bar')
>>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
>>> slave.get('foo')
'bar'
``sentinels`` is a list of sentinel nodes. Each node is represented by
a pair (hostname, port).
``min_other_sentinels`` defined a minimum number of peers for a sentinel.
When querying a sentinel, if it doesn't meet this threshold, responses
from that sentinel won't be considered valid.
``sentinel_kwargs`` is a dictionary of connection arguments used when
connecting to sentinel instances. Any argument that can be passed to
a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
not specified, any socket_timeout and socket_keepalive options specified
in ``connection_kwargs`` will be used.
``connection_kwargs`` are keyword arguments that will be used when
establishing a connection to a Redis server.
"""
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
**connection_kwargs):
# if sentinel_kwargs isn't defined, use the socket_* options from
# connection_kwargs
if sentinel_kwargs is None:
sentinel_kwargs = dict([(k, v)
for k, v in iteritems(connection_kwargs)
if k.startswith('socket_')
])
self.sentinel_kwargs = sentinel_kwargs
self.sentinels = [StrictRedis(hostname, port, **self.sentinel_kwargs)
for hostname, port in sentinels]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
def __repr__(self):
sentinel_addresses = []
for sentinel in self.sentinels:
sentinel_addresses.append('%s:%s' % (
sentinel.connection_pool.connection_kwargs['host'],
sentinel.connection_pool.connection_kwargs['port'],
))
return '%s<sentinels=[%s]>' % (
type(self).__name__,
','.join(sentinel_addresses))
def check_master_state(self, state, service_name):
if not state['is_master'] or state['is_sdown'] or state['is_odown']:
return False
# Check if our sentinel doesn't see other nodes
if state['num-other-sentinels'] < self.min_other_sentinels:
return False
return True
def discover_master(self, service_name):
"""
Asks sentinel servers for the Redis master's address corresponding
to the service labeled ``service_name``.
Returns a pair (address, port) or raises MasterNotFoundError if no
master is found.
"""
for sentinel_no, sentinel in enumerate(self.sentinels):
try:
masters = sentinel.sentinel_masters()
except ConnectionError:
continue
state = masters.get(service_name)
if state and self.check_master_state(state, service_name):
# Put this sentinel at the top of the list
self.sentinels[0], self.sentinels[sentinel_no] = (
sentinel, self.sentinels[0])
return state['ip'], state['port']
raise MasterNotFoundError("No master found for %r" % (service_name,))
def filter_slaves(self, slaves):
"Remove slaves that are in an ODOWN or SDOWN state"
slaves_alive = []
for slave in slaves:
if slave['is_odown'] or slave['is_sdown']:
continue
slaves_alive.append((slave['ip'], slave['port']))
return slaves_alive
def discover_slaves(self, service_name):
"Returns a list of alive slaves for service ``service_name``"
for sentinel in self.sentinels:
try:
slaves = sentinel.sentinel_slaves(service_name)
except (ConnectionError, ResponseError):
continue
slaves = self.filter_slaves(slaves)
if slaves:
return slaves
return []
def master_for(self, service_name, redis_class=StrictRedis,
connection_pool_class=SentinelConnectionPool, **kwargs):
"""
Returns a redis client instance for the ``service_name`` master.
A SentinelConnectionPool class is used to retrive the master's
address before establishing a new connection.
NOTE: If the master's address has changed, any cached connections to
the old master are closed.
By default clients will be a redis.StrictRedis instance. Specify a
different class to the ``redis_class`` argument if you desire
something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs['is_master'] = True
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(
service_name, self, **connection_kwargs))
def slave_for(self, service_name, redis_class=StrictRedis,
connection_pool_class=SentinelConnectionPool, **kwargs):
"""
Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrive the slave's
address before establishing a new connection.
By default clients will be a redis.StrictRedis instance. Specify a
different class to the ``redis_class`` argument if you desire
something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs['is_master'] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class(connection_pool=connection_pool_class(
service_name, self, **connection_kwargs))
| mit | 6,892,345,237,791,335,000 | 39.170068 | 89 | 0.617019 | false | 4.538816 | false | false | false | 0.000085 |
maysara/pandora_image | pandora/archive/migrations/0004_item_id_not_null.py | 1 | 13841 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, connection, transaction
class Migration(SchemaMigration):
def forwards(self, orm):
table_name = orm['archive.File']._meta.db_table
cursor = connection.cursor()
sql = 'ALTER TABLE "%s" ALTER COLUMN item_id DROP NOT NULL' % table_name
cursor.execute(sql)
transaction.commit_unless_managed()
def backwards(self, orm):
pass
models = {
'archive.file': {
'Meta': {'object_name': 'File'},
'audio_codec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bits_per_pixel': ('django.db.models.fields.FloatField', [], {'default': '-1'}),
'channels': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'display_aspect_ratio': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duration': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'framerate': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'height': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('ox.django.fields.DictField', [], {'default': '{}'}),
'is_audio': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_subtitle': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_video': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'null': 'True', 'to': "orm['item.Item']"}),
'language': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oshash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'part': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'part_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'pixel_format': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pixels': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'samplerate': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'selected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'sort_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'uploading': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'video_codec': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'wanted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'width': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'archive.frame': {
'Meta': {'unique_together': "(('file', 'position'),)", 'object_name': 'Frame'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'frames'", 'to': "orm['archive.File']"}),
'frame': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.FloatField', [], {})
},
'archive.instance': {
'Meta': {'unique_together': "(('path', 'volume'),)", 'object_name': 'Instance'},
'atime': ('django.db.models.fields.IntegerField', [], {'default': '1360404509'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctime': ('django.db.models.fields.IntegerField', [], {'default': '1360404509'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'to': "orm['archive.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mtime': ('django.db.models.fields.IntegerField', [], {'default': '1360404509'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'volume': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['archive.Volume']"})
},
'archive.stream': {
'Meta': {'unique_together': "(('file', 'resolution', 'format'),)", 'object_name': 'Stream'},
'aspect_ratio': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'color': ('ox.django.fields.TupleField', [], {'default': '[]'}),
'cuts': ('ox.django.fields.TupleField', [], {'default': '[]'}),
'duration': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'streams'", 'to': "orm['archive.File']"}),
'format': ('django.db.models.fields.CharField', [], {'default': "'webm'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('ox.django.fields.DictField', [], {'default': '{}'}),
'oshash': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'db_index': 'True'}),
'resolution': ('django.db.models.fields.IntegerField', [], {'default': '96'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'derivatives'", 'null': 'True', 'to': "orm['archive.Stream']"}),
'video': ('django.db.models.fields.files.FileField', [], {'default': 'None', 'max_length': '100', 'blank': 'True'}),
'volume': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'archive.volume': {
'Meta': {'unique_together': "(('user', 'name'),)", 'object_name': 'Volume'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'volumes'", 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'item.item': {
'Meta': {'object_name': 'Item'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('ox.django.fields.DictField', [], {'default': '{}'}),
'external_data': ('ox.django.fields.DictField', [], {'default': '{}'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'items'", 'blank': 'True', 'to': "orm['auth.Group']"}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'itemId': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'blank': 'True'}),
'json': ('ox.django.fields.DictField', [], {'default': '{}'}),
'level': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''"}),
'oxdbId': ('django.db.models.fields.CharField', [], {'max_length': '42', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'poster': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'blank': 'True'}),
'poster_frame': ('django.db.models.fields.FloatField', [], {'default': '-1'}),
'poster_height': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'poster_source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'poster_width': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rendered': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'stream_aspect': ('django.db.models.fields.FloatField', [], {'default': '1.3333333333333333'}),
'stream_info': ('ox.django.fields.DictField', [], {'default': '{}'}),
'torrent': ('django.db.models.fields.files.FileField', [], {'default': 'None', 'max_length': '1000', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'null': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['archive']
| gpl-3.0 | 2,189,080,101,112,367,900 | 80.899408 | 182 | 0.542013 | false | 3.709729 | false | false | false | 0.008381 |
CognitionGuidedSurgery/restflow | restflow/server.py | 1 | 1729 | # -*- encoding: utf-8 -*-
# Copyright (C) 2013-2014 Alexander Weigl, Nicolai Schoch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Flask Restful Server for Hiflow3Session.
"""
__author__ = 'Alexander Weigl'
__date__ = '2014-07-11'
from flask import Flask
from flask.ext.restful import Api
from flask_restful_swagger import swagger
from . import config
from .services import *
app = Flask(__name__)
api = Api(app)
api = swagger.docs(api,
apiVersion='1.0', api_spec_url='/api/spec',
basePath=config.BASE_PATH)
api.add_resource(TemplateList, '/template')
api.add_resource(Template, '/template/<string:type>')
api.add_resource(Assets, '/assets')
api.add_resource(Assets2, '/assets/<string:aid>')
api.add_resource(ResultFunctionsList, '/results')
# Session Management
api.add_resource(SimulationOpen, '/session')
api.add_resource(Simulation, '/session/<string:token>')
api.add_resource(RunSimulation, '/session/<string:token>/run')
# Simulation
api.add_resource(Result, '/session/<string:token>/result/<int:step>/<string:func>')
api.add_resource(ResultList, '/session/<string:token>/result/')
| gpl-3.0 | -2,525,602,553,456,813,600 | 31.018519 | 83 | 0.725275 | false | 3.514228 | false | false | false | 0.000578 |
vahtras/amy | extforms/urls.py | 1 | 1300 | from django.conf.urls import url, include
from extforms import views
urlpatterns = [
url(r'^swc/request/$', views.SWCEventRequest.as_view(), name='swc_workshop_request'),
url(r'^swc/request/confirm/$', views.SWCEventRequestConfirm.as_view(), name='swc_workshop_request_confirm'),
url(r'^dc/request/$', views.DCEventRequest.as_view(), name='dc_workshop_request'),
url(r'^dc/request/confirm/$', views.DCEventRequestConfirm.as_view(), name='dc_workshop_request_confirm'),
url(r'^dc/request_selforganized/$', views.DCSelfOrganizedEventRequest.as_view(), name='dc_workshop_selforganized_request'),
url(r'^dc/request_selforganized/confirm/$', views.DCSelfOrganizedEventRequestConfirm.as_view(), name='dc_workshop_selforganized_request_confirm'),
url(r'^submit/$', views.EventSubmission.as_view(), name='event_submit'),
# disabled as per @maneesha's request
# url(r'^submit/confirm/$', views.EventSubmissionConfirm.as_view(), name='event_submission_confirm'),
url(r'^update_profile/$', views.profileupdaterequest_create, name='profileupdate_request'),
url(r'^request_training/$', views.TrainingRequestCreate.as_view(), name='training_request'),
url(r'^request_training/confirm/$', views.TrainingRequestConfirm.as_view(), name='training_request_confirm'),
]
| mit | -2,958,334,986,162,801,000 | 71.222222 | 150 | 0.732308 | false | 3.412073 | false | true | false | 0.007692 |
rouxcode/django-admin-sort | admin_sort/tests/testapp/models.py | 1 | 5140 | # -*- coding: utf-8 -*-
from django.db import models
from admin_sort.models import SortableModelMixin
class Author(SortableModelMixin, models.Model):
"""
SortableModelMixin: on save, intercept and first update needed other
instances, then save
"""
name = models.CharField('Name', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
position_field = 'my_order'
insert_position = 'last'
class Meta:
ordering = ('my_order', )
def __unicode__(self):
return self.name
class SortableBook(models.Model):
"""
the classic sortable change list: dndrop sorting, using SortableAdminMixin
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
class Meta(object):
ordering = ('my_order',)
def __unicode__(self):
return self.title
class AnotherSortableBook(models.Model):
"""
the other sortable change list: dropdowns sorting,
using DropdownSortableAdminMixin
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
class Meta(object):
ordering = ('my_order',)
def __unicode__(self):
return self.title
class Chapter(models.Model):
"""
various SortableInlineMixon modes
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
another_book = models.ForeignKey(
AnotherSortableBook, null=True, on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
another_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_order', )
def __unicode__(self):
return 'Chapter: {0}'.format(self.title)
class Notes(models.Model):
"""
various SortableInlineMixon modes
"""
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
another_book = models.ForeignKey(
AnotherSortableBook, null=True, on_delete=models.SET_NULL)
note = models.CharField('Note', null=True, blank=True, max_length=255)
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
one_more = models.CharField(
'Note3 (simulating tabular inlines)',
null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(blank=False, null=True)
another_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_order', )
def __unicode__(self):
return 'Note: {0}'.format(self.note)
class ChapterExtraZero(models.Model):
"""
various SortableInlineMixon modes (testing "extra" on admin.Meta)
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', '-title')
def __unicode__(self):
return 'ChapterExtraZero: {0}'.format(self.title)
class NotesExtraZero(models.Model):
"""
various SortableInlineMixon modes (testing "extra" on admin.Meta)
"""
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_field')
def __unicode__(self):
return 'NotesExtraZero: {0}'.format(self.another_field)
class Another(models.Model):
"""
normal inline - affected in any way!?
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', '-title')
def __unicode__(self):
return 'Another: {0}'.format(self.title)
class AnotherOne(models.Model):
"""
normal inline - affected in any way!?
"""
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
def __unicode__(self):
return 'AnotherOne: {0}'.format(self.another_field)
| mit | -3,920,569,112,322,875,400 | 31.531646 | 78 | 0.647665 | false | 3.79056 | false | false | false | 0 |
ademuk/django-oscar | src/oscar/apps/dashboard/catalogue/forms.py | 5 | 16311 | from django import forms
from django.core import exceptions
from django.forms.models import inlineformset_factory
from django.utils.translation import ugettext_lazy as _
from treebeard.forms import movenodeform_factory
from oscar.core.loading import get_class, get_model
from oscar.core.utils import slugify
from oscar.forms.widgets import ImageInput
Product = get_model('catalogue', 'Product')
ProductClass = get_model('catalogue', 'ProductClass')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
Category = get_model('catalogue', 'Category')
StockRecord = get_model('partner', 'StockRecord')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
ProductSelect = get_class('dashboard.catalogue.widgets', 'ProductSelect')
CategoryForm = movenodeform_factory(
Category,
fields=['name', 'description', 'image'])
class ProductClassSelectForm(forms.Form):
"""
Form which is used before creating a product to select it's product class
"""
product_class = forms.ModelChoiceField(
label=_("Create a new product of type"),
empty_label=_("-- Choose type --"),
queryset=ProductClass.objects.all())
def __init__(self, *args, **kwargs):
"""
If there's only one product class, pre-select it
"""
super(ProductClassSelectForm, self).__init__(*args, **kwargs)
qs = self.fields['product_class'].queryset
if not kwargs.get('initial') and len(qs) == 1:
self.fields['product_class'].initial = qs[0]
class ProductSearchForm(forms.Form):
upc = forms.CharField(max_length=16, required=False, label=_('UPC'))
title = forms.CharField(
max_length=255, required=False, label=_('Product title'))
def clean(self):
cleaned_data = super(ProductSearchForm, self).clean()
cleaned_data['upc'] = cleaned_data['upc'].strip()
cleaned_data['title'] = cleaned_data['title'].strip()
return cleaned_data
class StockRecordForm(forms.ModelForm):
def __init__(self, product_class, user, *args, **kwargs):
# The user kwarg is not used by stock StockRecordForm. We pass it
# anyway in case one wishes to customise the partner queryset
self.user = user
super(StockRecordForm, self).__init__(*args, **kwargs)
# If not tracking stock, we hide the fields
if not product_class.track_stock:
del self.fields['num_in_stock']
del self.fields['low_stock_threshold']
else:
self.fields['price_excl_tax'].required = True
self.fields['num_in_stock'].required = True
class Meta:
model = StockRecord
fields = [
'partner', 'partner_sku',
'price_currency', 'price_excl_tax', 'price_retail', 'cost_price',
'num_in_stock', 'low_stock_threshold',
]
BaseStockRecordFormSet = inlineformset_factory(
Product, StockRecord, form=StockRecordForm, extra=1)
class StockRecordFormSet(BaseStockRecordFormSet):
def __init__(self, product_class, user, *args, **kwargs):
self.user = user
self.require_user_stockrecord = not user.is_staff
self.product_class = product_class
super(StockRecordFormSet, self).__init__(*args, **kwargs)
self.set_initial_data()
def set_initial_data(self):
"""
If user has only one partner associated, set the first
stock record's partner to it. Can't pre-select for staff users as
they're allowed to save a product without a stock record.
This is intentionally done after calling __init__ as passing initial
data to __init__ creates a form for each list item. So depending on
whether we can pre-select the partner or not, we'd end up with 1 or 2
forms for an unbound form.
"""
if self.require_user_stockrecord:
try:
user_partner = self.user.partners.get()
except (exceptions.ObjectDoesNotExist,
exceptions.MultipleObjectsReturned):
pass
else:
partner_field = self.forms[0].fields.get('partner', None)
if partner_field and partner_field.initial is None:
partner_field.initial = user_partner
def _construct_form(self, i, **kwargs):
kwargs['product_class'] = self.product_class
kwargs['user'] = self.user
return super(StockRecordFormSet, self)._construct_form(
i, **kwargs)
def clean(self):
"""
If the user isn't a staff user, this validation ensures that at least
one stock record's partner is associated with a users partners.
"""
if any(self.errors):
return
if self.require_user_stockrecord:
stockrecord_partners = set([form.cleaned_data.get('partner', None)
for form in self.forms])
user_partners = set(self.user.partners.all())
if not user_partners & stockrecord_partners:
raise exceptions.ValidationError(
_("At least one stock record must be set to a partner that"
" you're associated with."))
def _attr_text_field(attribute):
return forms.CharField(label=attribute.name,
required=attribute.required)
def _attr_textarea_field(attribute):
return forms.CharField(label=attribute.name,
widget=forms.Textarea(),
required=attribute.required)
def _attr_integer_field(attribute):
return forms.IntegerField(label=attribute.name,
required=attribute.required)
def _attr_boolean_field(attribute):
return forms.BooleanField(label=attribute.name,
required=attribute.required)
def _attr_float_field(attribute):
return forms.FloatField(label=attribute.name,
required=attribute.required)
def _attr_date_field(attribute):
return forms.DateField(label=attribute.name,
required=attribute.required,
widget=forms.widgets.DateInput)
def _attr_option_field(attribute):
return forms.ModelChoiceField(
label=attribute.name,
required=attribute.required,
queryset=attribute.option_group.options.all())
def _attr_multi_option_field(attribute):
return forms.ModelMultipleChoiceField(
label=attribute.name,
required=attribute.required,
queryset=attribute.option_group.options.all())
def _attr_entity_field(attribute):
# Product entities don't have out-of-the-box supported in the ProductForm.
# There is no ModelChoiceField for generic foreign keys, and there's no
# good default behaviour anyway; offering a choice of *all* model instances
# is hardly useful.
return None
def _attr_numeric_field(attribute):
return forms.FloatField(label=attribute.name,
required=attribute.required)
def _attr_file_field(attribute):
return forms.FileField(
label=attribute.name, required=attribute.required)
def _attr_image_field(attribute):
return forms.ImageField(
label=attribute.name, required=attribute.required)
class ProductForm(forms.ModelForm):
FIELD_FACTORIES = {
"text": _attr_text_field,
"richtext": _attr_textarea_field,
"integer": _attr_integer_field,
"boolean": _attr_boolean_field,
"float": _attr_float_field,
"date": _attr_date_field,
"option": _attr_option_field,
"multi_option": _attr_multi_option_field,
"entity": _attr_entity_field,
"numeric": _attr_numeric_field,
"file": _attr_file_field,
"image": _attr_image_field,
}
class Meta:
model = Product
fields = [
'title', 'upc', 'description', 'is_discountable', 'structure']
widgets = {
'structure': forms.HiddenInput()
}
def __init__(self, product_class, data=None, parent=None, *args, **kwargs):
self.set_initial(product_class, parent, kwargs)
super(ProductForm, self).__init__(data, *args, **kwargs)
if parent:
self.instance.parent = parent
# We need to set the correct product structures explicitly to pass
# attribute validation and child product validation. Note that
# those changes are not persisted.
self.instance.structure = Product.CHILD
self.instance.parent.structure = Product.PARENT
self.delete_non_child_fields()
else:
# Only set product class for non-child products
self.instance.product_class = product_class
self.add_attribute_fields(product_class, self.instance.is_parent)
if 'title' in self.fields:
self.fields['title'].widget = forms.TextInput(
attrs={'autocomplete': 'off'})
def set_initial(self, product_class, parent, kwargs):
"""
Set initial data for the form. Sets the correct product structure
and fetches initial values for the dynamically constructed attribute
fields.
"""
if 'initial' not in kwargs:
kwargs['initial'] = {}
self.set_initial_attribute_values(product_class, kwargs)
if parent:
kwargs['initial']['structure'] = Product.CHILD
def set_initial_attribute_values(self, product_class, kwargs):
"""
Update the kwargs['initial'] value to have the initial values based on
the product instance's attributes
"""
instance = kwargs.get('instance')
if instance is None:
return
for attribute in product_class.attributes.all():
try:
value = instance.attribute_values.get(
attribute=attribute).value
except exceptions.ObjectDoesNotExist:
pass
else:
kwargs['initial']['attr_%s' % attribute.code] = value
def add_attribute_fields(self, product_class, is_parent=False):
"""
For each attribute specified by the product class, this method
dynamically adds form fields to the product form.
"""
for attribute in product_class.attributes.all():
field = self.get_attribute_field(attribute)
if field:
self.fields['attr_%s' % attribute.code] = field
# Attributes are not required for a parent product
if is_parent:
self.fields['attr_%s' % attribute.code].required = False
def get_attribute_field(self, attribute):
"""
Gets the correct form field for a given attribute type.
"""
return self.FIELD_FACTORIES[attribute.type](attribute)
def delete_non_child_fields(self):
"""
Deletes any fields not needed for child products. Override this if
you want to e.g. keep the description field.
"""
for field_name in ['description', 'is_discountable']:
if field_name in self.fields:
del self.fields[field_name]
def _post_clean(self):
"""
Set attributes before ModelForm calls the product's clean method
(which it does in _post_clean), which in turn validates attributes.
"""
product_class = self.instance.get_product_class()
for attribute in product_class.attributes.all():
field_name = 'attr_%s' % attribute.code
# An empty text field won't show up in cleaned_data.
if field_name in self.cleaned_data:
value = self.cleaned_data[field_name]
setattr(self.instance.attr, attribute.code, value)
super(ProductForm, self)._post_clean()
class StockAlertSearchForm(forms.Form):
status = forms.CharField(label=_('Status'))
class ProductCategoryForm(forms.ModelForm):
class Meta:
model = ProductCategory
fields = ('category', )
BaseProductCategoryFormSet = inlineformset_factory(
Product, ProductCategory, form=ProductCategoryForm, extra=1,
can_delete=True)
class ProductCategoryFormSet(BaseProductCategoryFormSet):
def __init__(self, product_class, user, *args, **kwargs):
# This function just exists to drop the extra arguments
super(ProductCategoryFormSet, self).__init__(*args, **kwargs)
def clean(self):
if not self.instance.is_child and self.get_num_categories() == 0:
raise forms.ValidationError(
_("Stand-alone and parent products "
"must have at least one category"))
if self.instance.is_child and self.get_num_categories() > 0:
raise forms.ValidationError(
_("A child product should not have categories"))
def get_num_categories(self):
num_categories = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if (hasattr(form, 'cleaned_data')
and form.cleaned_data.get('category', None)
and not form.cleaned_data.get('DELETE', False)):
num_categories += 1
return num_categories
class ProductImageForm(forms.ModelForm):
class Meta:
model = ProductImage
fields = ['product', 'original', 'caption']
# use ImageInput widget to create HTML displaying the
# actual uploaded image and providing the upload dialog
# when clicking on the actual image.
widgets = {
'original': ImageInput(),
}
def save(self, *args, **kwargs):
# We infer the display order of the image based on the order of the
# image fields within the formset.
kwargs['commit'] = False
obj = super(ProductImageForm, self).save(*args, **kwargs)
obj.display_order = self.get_display_order()
obj.save()
return obj
def get_display_order(self):
return self.prefix.split('-').pop()
BaseProductImageFormSet = inlineformset_factory(
Product, ProductImage, form=ProductImageForm, extra=2)
class ProductImageFormSet(BaseProductImageFormSet):
def __init__(self, product_class, user, *args, **kwargs):
super(ProductImageFormSet, self).__init__(*args, **kwargs)
class ProductRecommendationForm(forms.ModelForm):
class Meta:
model = ProductRecommendation
fields = ['primary', 'recommendation', 'ranking']
widgets = {
'recommendation': ProductSelect,
}
BaseProductRecommendationFormSet = inlineformset_factory(
Product, ProductRecommendation, form=ProductRecommendationForm,
extra=5, fk_name="primary")
class ProductRecommendationFormSet(BaseProductRecommendationFormSet):
def __init__(self, product_class, user, *args, **kwargs):
super(ProductRecommendationFormSet, self).__init__(*args, **kwargs)
class ProductClassForm(forms.ModelForm):
class Meta:
model = ProductClass
fields = ['name', 'requires_shipping', 'track_stock', 'options']
class ProductAttributesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProductAttributesForm, self).__init__(*args, **kwargs)
# because we'll allow submission of the form with blank
# codes so that we can generate them.
self.fields["code"].required = False
self.fields["option_group"].help_text = _("Select an option group")
def clean_code(self):
code = self.cleaned_data.get("code")
title = self.cleaned_data.get("name")
if not code and title:
code = slugify(title)
return code
class Meta:
model = ProductAttribute
fields = ["name", "code", "type", "option_group", "required"]
ProductAttributesFormSet = inlineformset_factory(ProductClass,
ProductAttribute,
form=ProductAttributesForm,
extra=3)
| bsd-3-clause | -6,237,700,017,958,310,000 | 34.535948 | 79 | 0.620808 | false | 4.391761 | false | false | false | 0.000061 |