repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
psibi/pyuClassify | refs/heads/master | uclassify/uclassify.py | 1 | #!/usr/bin/python
# Copyright (C) 2012 Sibi <sibi@psibi.in>
#
# This file is part of pyuClassify.
#
# pyuClassify program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyuClassify program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyuClassify program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyuClassify. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Sibi <sibi@psibi.in>
from xml.dom.minidom import Document
from time import gmtime, strftime
from .uclassify_eh import uClassifyError
import xml.dom.minidom
import requests
import base64
class uclassify:
def __init__(self):
self.api_url = "http://api.uclassify.com"
self.writeApiKey=None
self.readApiKey=None
def setWriteApiKey(self,key):
self.writeApiKey = key
def setReadApiKey(self,key):
self.readApiKey = key
def _buildbasicXMLdoc(self):
doc = Document()
root_element = doc.createElementNS('http://api.uclassify.com/1/RequestSchema', 'uclassify')
root_element.setAttribute("version", "1.01")
root_element.setAttribute("xmlns", "http://api.uclassify.com/1/RequestSchema")
doc.appendChild(root_element)
#texts = doc.createElement("texts")
#root_element.appendChild(texts)
#print(doc.toprettyxml())
return doc,root_element
def _getText(self,nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def _getResponseCode(self,content):
"""Returns the status code from the content.
:param content: (required) XML Response content
"""
doc = xml.dom.minidom.parseString(content)
node = doc.documentElement
status = node.getElementsByTagName("status")
success = status[0].getAttribute("success")
status_code = status[0].getAttribute("statusCode")
text = self._getText(status[0].childNodes)
return success, status_code, text
def create(self,classifierName):
"""Creates a new classifier.
:param classifierName: (required) The Classifier Name you are going to create.
"""
doc,root_element = self._buildbasicXMLdoc()
writecalls = doc.createElement("writeCalls")
writecalls.setAttribute("writeApiKey",self.writeApiKey) #Add exception handling here
writecalls.setAttribute("classifierName",classifierName)
create = doc.createElement("create")
cur_time = strftime("%Y%m%d%H%M", gmtime())
create.setAttribute("id",cur_time + "create" + classifierName)
root_element.appendChild(writecalls)
writecalls.appendChild(create)
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
raise uClassifyError("Bad XML Request Sent")
def addClass(self,className,classifierName):
"""Adds class to an existing Classifier.
:param className: (required) A List containing various classes that has to be added for the given Classifier.
:param classifierName: (required) Classifier where the classes will be added to.
"""
doc, root_element = self._buildbasicXMLdoc()
writecalls = doc.createElement("writeCalls")
if self.writeApiKey == None:
raise uClassifyError("Write API Key not Initialized")
writecalls.setAttribute("writeApiKey",self.writeApiKey)
writecalls.setAttribute("classifierName",classifierName)
root_element.appendChild(writecalls)
for clas in className:
addclass = doc.createElement("addClass")
addclass.setAttribute("id","AddClass" + clas)
addclass.setAttribute("className",clas)
writecalls.appendChild(addclass)
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
raise uClassifyError("Bad XML Request Sent")
def removeClass(self,className,classifierName):
"""Removes class from an existing Classifier.
:param className: (required) A List containing various classes that will be removed from the given Classifier.
:param classifierName: (required) Classifier
"""
doc, root_element = self._buildbasicXMLdoc()
writecalls = doc.createElement("writeCalls")
if self.writeApiKey == None:
raise uClassifyError("Write API Key not Initialized")
writecalls.setAttribute("writeApiKey",self.writeApiKey)
writecalls.setAttribute("classifierName",classifierName)
root_element.appendChild(writecalls)
for clas in className:
addclass = doc.createElement("removeClass")
addclass.setAttribute("id","removeClass" + clas)
addclass.setAttribute("className",clas)
writecalls.appendChild(addclass)
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
raise uClassifyError("Bad XML Request Sent")
def train(self,texts,className,classifierName):
"""Performs training on a single classs.
:param texts: (required) A List of text used up for training.
:param className: (required) Name of the class that needs to be trained.
:param classifierName: (required) Name of the Classifier
"""
base64texts = []
for text in texts:
base64_text = base64.b64encode(text.encode('utf-8'))
base64texts.append(base64_text)
doc,root_element = self._buildbasicXMLdoc()
textstag = doc.createElement("texts")
writecalls = doc.createElement("writeCalls")
if self.writeApiKey == None:
raise uClassifyError("Write API Key not Initialized")
writecalls.setAttribute("writeApiKey",self.writeApiKey)
writecalls.setAttribute("classifierName",classifierName)
root_element.appendChild(textstag)
root_element.appendChild(writecalls)
counter = 1
for text in base64texts:
textbase64 = doc.createElement("textBase64")
traintag = doc.createElement("train")
textbase64.setAttribute("id",className + "Text" + str(counter))
ptext = doc.createTextNode(text.decode('utf-8'))
textbase64.appendChild(ptext)
textstag.appendChild(textbase64)
traintag.setAttribute("id","Train"+className+ str(counter))
traintag.setAttribute("className",className)
traintag.setAttribute("textId",className + "Text" + str(counter))
counter = counter + 1
writecalls.appendChild(traintag)
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
raise uClassifyError("Bad XML Request Sent")
def untrain(self,texts,className,classifierName):
"""Performs untraining on text for a specific class.
:param texts: (required) A List of text used up for training.
:param className: (required) Name of the class.
:param classifierName: (required) Name of the Classifier
"""
base64texts = []
for text in texts:
base64_text = base64.b64encode(text.encode('utf-8'))
base64texts.append(base64_text)
doc,root_element = self._buildbasicXMLdoc()
textstag = doc.createElement("texts")
writecalls = doc.createElement("writeCalls")
if self.writeApiKey == None:
raise uClassifyError("Write API Key not Initialized")
writecalls.setAttribute("writeApiKey",self.writeApiKey)
writecalls.setAttribute("classifierName",classifierName)
root_element.appendChild(textstag)
root_element.appendChild(writecalls)
counter = 1
for text in base64texts:
textbase64 = doc.createElement("textBase64")
traintag = doc.createElement("untrain")
textbase64.setAttribute("id",className + "Text" + str(counter))
ptext = doc.createTextNode(text.decode('utf-8'))
textbase64.appendChild(ptext)
textstag.appendChild(textbase64)
traintag.setAttribute("id","Untrain"+className+ str(counter))
traintag.setAttribute("className",className)
traintag.setAttribute("textId",className + "Text" + str(counter))
counter = counter + 1
writecalls.appendChild(traintag)
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
raise uClassifyError("Bad XML Request Sent")
def classify(self,texts,classifierName,username = None):
"""Performs classification on texts.
:param texts: (required) A List of texts that needs to be classified.
:param classifierName: (required) Classifier Name
:param username: (optional): Name of the user, under whom the classifier exists.
"""
doc,root_element = self._buildbasicXMLdoc()
textstag = doc.createElement("texts")
readcalls = doc.createElement("readCalls")
if self.readApiKey == None:
raise uClassifyError("Read API Key not Initialized")
readcalls.setAttribute("readApiKey",self.readApiKey)
root_element.appendChild(textstag)
root_element.appendChild(readcalls)
base64texts = []
for text in texts:
base64_text = base64.b64encode(text.encode('utf-8'))
base64texts.append(base64_text)
counter = 1
for text in base64texts:
textbase64 = doc.createElement("textBase64")
classifytag = doc.createElement("classify")
textbase64.setAttribute("id","Classifytext"+ str(counter))
ptext = doc.createTextNode(text.decode('utf-8'))
textbase64.appendChild(ptext)
classifytag.setAttribute("id","Classify"+ str(counter))
classifytag.setAttribute("classifierName",classifierName)
classifytag.setAttribute("textId","Classifytext"+str(counter))
if username != None:
classifytag.setAttribute("username",username)
textstag.appendChild(textbase64)
readcalls.appendChild(classifytag)
counter = counter + 1
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
return self.parseClassifyResponse(r.content,texts)
else:
raise uClassifyError("Bad XML Request Sent")
def parseClassifyResponse(self,content,texts):
"""Parses the Classifier response from the server.
:param content: (required) XML Response from server.
"""
counter = 0
doc = xml.dom.minidom.parseString(content)
node = doc.documentElement
result = []
classifytags = node.getElementsByTagName("classification")
for classi in classifytags:
text_coverage = classi.getAttribute("textCoverage")
classtags = classi.getElementsByTagName("class")
cresult = []
for ctag in classtags:
classname = ctag.getAttribute("className")
cper = ctag.getAttribute("p")
tup = (classname,cper)
cresult.append(tup)
result.append((texts[counter],text_coverage,cresult))
counter = counter + 1
return result
def classifyKeywords(self,texts,classifierName,username = None):
"""Performs classification on texts.
:param texts: (required) A List of texts that needs to be classified.
:param classifierName: (required) Classifier Name
:param username: (optional): Name of the user, under whom the classifier exists.
"""
doc,root_element = self._buildbasicXMLdoc()
textstag = doc.createElement("texts")
readcalls = doc.createElement("readCalls")
if self.readApiKey == None:
raise uClassifyError("Read API Key not Initialized")
readcalls.setAttribute("readApiKey",self.readApiKey)
root_element.appendChild(textstag)
root_element.appendChild(readcalls)
base64texts = []
for text in texts:
base64_text = base64.b64encode(text.encode('utf-8'))
base64texts.append(base64_text)
counter = 1
for text in base64texts:
textbase64 = doc.createElement("textBase64")
classifytag = doc.createElement("classifyKeywords")
textbase64.setAttribute("id","Classifytext"+ str(counter))
ptext = doc.createTextNode(text.decode('utf-8'))
textbase64.appendChild(ptext)
classifytag.setAttribute("id","Classify"+ str(counter))
classifytag.setAttribute("classifierName",classifierName)
classifytag.setAttribute("textId","Classifytext"+str(counter))
if username != None:
classifytag.setAttribute("username",username)
textstag.appendChild(textbase64)
readcalls.appendChild(classifytag)
counter = counter + 1
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
return self.parseClassifyResponse(r.content,texts)
else:
raise uClassifyError("Bad XML Request Sent")
def parseClassifyKeywordResponse(self,content,texts):
"""Parses the Classifier response from the server.
:param content: (required) XML Response from server.
"""
counter = 0
doc = xml.dom.minidom.parseString(content)
node = doc.documentElement
result = []
keyw = []
classifytags = node.getElementsByTagName("classification")
keywordstags = node.getElementsByTagName("keywords")
for keyword in keywordstags:
classtags = keyword.getElementsByTagName("class")
for ctag in classtags:
kw = ctag.firstChild.data
if kw != "":
keyw.append(kw)
for classi in classifytags:
text_coverage = classi.getAttribute("textCoverage")
classtags = classi.getElementsByTagName("class")
cresult = []
for ctag in classtags:
classname = ctag.getAttribute("className")
cper = ctag.getAttribute("p")
tup = (classname,cper)
cresult.append(tup)
result.append((texts[counter],text_coverage,cresult,keyw))
counter = counter + 1
return result
def getInformation(self,classifierName):
"""Returns Information about the Classifier in a List.
:param classifierName: (required) Classifier Name
"""
doc,root_element = self._buildbasicXMLdoc()
readcalls = doc.createElement("readCalls")
if self.readApiKey == None:
raise uClassifyError("Read API Key not Initialized")
readcalls.setAttribute("readApiKey",self.readApiKey)
root_element.appendChild(readcalls)
getinfotag = doc.createElement("getInformation")
getinfotag.setAttribute("id","GetInformation")
getinfotag.setAttribute("classifierName",classifierName)
readcalls.appendChild(getinfotag)
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
return self._parseClassifierInformation(r.content)
else:
raise uClassifyError("Bad XML Request Sent")
def _parseClassifierInformation(self,content):
doc = xml.dom.minidom.parseString(content)
node = doc.documentElement
classinfo = node.getElementsByTagName("classInformation")
result = []
for classes in classinfo:
cname = classes.getAttribute("className")
uf = classes.getElementsByTagName("uniqueFeatures")
tc = classes.getElementsByTagName("totalCount")
for uniquef in uf:
uf_data = uniquef.firstChild.data
for totalc in tc:
tc_data = totalc.firstChild.data
result.append((cname,uf_data,tc_data))
return result
def removeClassifier(self,classifierName):
"""Removes Classifier.
:param classifierName(required): Classifier Name
"""
doc,root_element = self._buildbasicXMLdoc()
writecalls = doc.createElement("writeCalls")
if self.writeApiKey == None:
raise uClassifyError("Write API Key not Initialized")
writecalls.setAttribute("writeApiKey",self.writeApiKey)
writecalls.setAttribute("classifierName",classifierName)
removetag = doc.createElement("remove")
removetag.setAttribute("id","Remove")
root_element.appendChild(writecalls)
writecalls.appendChild(removetag)
r = requests.post(self.api_url,doc.toxml())
if r.status_code == 200:
success, status_code, text = self._getResponseCode(r.content)
if success == "false":
raise uClassifyError(text,status_code)
else:
raise uClassifyError("Bad XML Request Sent")
if __name__ == "__main__":
a = uclassify()
a.setWriteApiKey("fsqAft7Hs29BgAc1AWeCIWdGnY")
a.setReadApiKey("aD02ApbU29kNOG2xezDGXPEIck")
#a.create("ManorWoma")
#a.addClass(["man","woman"],"ManorWoma")
#a.train(["dffddddddteddddxt1","teddddxfddddddddt2","taaaaffaaaaaedddddddddddddxt3"],"woman","ManorWoma")
#d =a.classifyKeywords(["helloof the jungle","madam of the bses","bye jungli billi"],"ManorWoma")
#a.getInformation("ManorWoma")
#a.removeClassifier("Freak")
a.removeClass(["man"],"ManorWoma")
|
matrixise/odoo | refs/heads/8.0 | addons/survey/wizard/__init__.py | 385 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import survey_email_compose_message
|
koobonil/Boss2D | refs/heads/master | Boss2D/addon/opencv-3.1.0_for_boss/3rdparty/jinja2/filters.py | 598 | # -*- coding: utf-8 -*-
"""
jinja2.filters
~~~~~~~~~~~~~~
Bundled jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import math
from random import choice
from operator import itemgetter
from itertools import groupby
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
unicode_urlencode
from jinja2.runtime import Undefined
from jinja2.exceptions import FilterArgumentError
from jinja2._compat import next, imap, string_types, text_type, iteritems
_word_re = re.compile(r'\w+(?u)')
def contextfilter(f):
"""Decorator for marking context dependent filters. The current
:class:`Context` will be passed as first argument.
"""
f.contextfilter = True
return f
def evalcontextfilter(f):
"""Decorator for marking eval-context dependent filters. An eval
context object is passed as first argument. For more information
about the eval context, see :ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfilter = True
return f
def environmentfilter(f):
"""Decorator for marking evironment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
"""
f.environmentfilter = True
return f
def make_attrgetter(environment, attribute):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
if not isinstance(attribute, string_types) \
or ('.' not in attribute and not attribute.isdigit()):
return lambda x: environment.getitem(x, attribute)
attribute = attribute.split('.')
def attrgetter(item):
for part in attribute:
if part.isdigit():
part = int(part)
item = environment.getitem(item, part)
return item
return attrgetter
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, '__html__'):
value = value.__html__()
return escape(text_type(value))
def do_urlencode(value):
"""Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
dictionaries and regular strings as well as pairwise iterables.
.. versionadded:: 2.7
"""
itemiter = None
if isinstance(value, dict):
itemiter = iteritems(value)
elif not isinstance(value, string_types):
try:
itemiter = iter(value)
except TypeError:
pass
if itemiter is None:
return unicode_urlencode(value)
return u'&'.join(unicode_urlencode(k) + '=' +
unicode_urlencode(v) for k, v in itemiter)
@evalcontextfilter
def do_replace(eval_ctx, s, old, new, count=None):
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return text_type(s).replace(text_type(old), text_type(new), count)
if hasattr(old, '__html__') or hasattr(new, '__html__') and \
not hasattr(s, '__html__'):
s = escape(s)
else:
s = soft_unicode(s)
return s.replace(soft_unicode(old), soft_unicode(new), count)
def do_upper(s):
"""Convert a value to uppercase."""
return soft_unicode(s).upper()
def do_lower(s):
"""Convert a value to lowercase."""
return soft_unicode(s).lower()
@evalcontextfilter
def do_xmlattr(_eval_ctx, d, autospace=True):
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = u' '.join(
u'%s="%s"' % (escape(key), escape(value))
for key, value in iteritems(d)
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = u' ' + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_capitalize(s):
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
return soft_unicode(s).capitalize()
def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
rv = []
for item in re.compile(r'([-\s]+)(?u)').split(s):
if not item:
continue
rv.append(item[0].upper() + item[1:].lower())
return ''.join(rv)
def do_dictsort(value, case_sensitive=False, by='key'):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by key, case insensitive, sorted
normally and ordered by value.
"""
if by == 'key':
pos = 0
elif by == 'value':
pos = 1
else:
raise FilterArgumentError('You can only sort by either '
'"key" or "value"')
def sort_func(item):
value = item[pos]
if isinstance(value, string_types) and not case_sensitive:
value = value.lower()
return value
return sorted(value.items(), key=sort_func)
@environmentfilter
def do_sort(environment, value, reverse=False, case_sensitive=False,
attribute=None):
"""Sort an iterable. Per default it sorts ascending, if you pass it
true as first argument it will reverse the sorting.
If the iterable is made of strings the third parameter can be used to
control the case sensitiveness of the comparison which is disabled by
default.
.. sourcecode:: jinja
{% for item in iterable|sort %}
...
{% endfor %}
It is also possible to sort by an attribute (for example to sort
by the date of an object) by specifying the `attribute` parameter:
.. sourcecode:: jinja
{% for item in iterable|sort(attribute='date') %}
...
{% endfor %}
.. versionchanged:: 2.6
The `attribute` parameter was added.
"""
if not case_sensitive:
def sort_func(item):
if isinstance(item, string_types):
item = item.lower()
return item
else:
sort_func = None
if attribute is not None:
getter = make_attrgetter(environment, attribute)
def sort_func(item, processor=sort_func or (lambda x: x)):
return processor(getter(item))
return sorted(value, key=sort_func, reverse=reverse)
def do_default(value, default_value=u'', boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
@evalcontextfilter
def do_join(eval_ctx, value, d=u'', attribute=None):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
.. sourcecode:: jinja
{{ [1, 2, 3]|join('|') }}
-> 1|2|3
{{ [1, 2, 3]|join }}
-> 123
It is also possible to join certain attributes of an object:
.. sourcecode:: jinja
{{ users|join(', ', attribute='username') }}
.. versionadded:: 2.6
The `attribute` parameter was added.
"""
if attribute is not None:
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
# no automatic escaping? joining is a lot eaiser then
if not eval_ctx.autoescape:
return text_type(d).join(imap(text_type, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, '__html__'):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
if hasattr(item, '__html__'):
do_escape = True
else:
value[idx] = text_type(item)
if do_escape:
d = escape(d)
else:
d = text_type(d)
return d.join(value)
# no html involved, to normal joining
return soft_unicode(d).join(imap(soft_unicode, value))
def do_center(value, width=80):
"""Centers the value in a field of a given width."""
return text_type(value).center(width)
@environmentfilter
def do_first(environment, seq):
"""Return the first item of a sequence."""
try:
return next(iter(seq))
except StopIteration:
return environment.undefined('No first item, sequence was empty.')
@environmentfilter
def do_last(environment, seq):
"""Return the last item of a sequence."""
try:
return next(iter(reversed(seq)))
except StopIteration:
return environment.undefined('No last item, sequence was empty.')
@environmentfilter
def do_random(environment, seq):
"""Return a random item from the sequence."""
try:
return choice(seq)
except IndexError:
return environment.undefined('No random item, sequence was empty.')
def do_filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
(binary and 'KiB' or 'kB'),
(binary and 'MiB' or 'MB'),
(binary and 'GiB' or 'GB'),
(binary and 'TiB' or 'TB'),
(binary and 'PiB' or 'PB'),
(binary and 'EiB' or 'EB'),
(binary and 'ZiB' or 'ZB'),
(binary and 'YiB' or 'YB')
]
if bytes == 1:
return '1 Byte'
elif bytes < base:
return '%d Bytes' % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return '%.1f %s' % ((base * bytes / unit), prefix)
return '%.1f %s' % ((base * bytes / unit), prefix)
def do_pprint(value, verbose=False):
"""Pretty print a variable. Useful for debugging.
With Jinja 1.2 onwards you can pass it a parameter. If this parameter
is truthy the output will be more verbose (this requires `pretty`)
"""
return pformat(value, verbose=verbose)
@evalcontextfilter
def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
"""
rv = urlize(value, trim_url_limit, nofollow)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_indent(s, width=4, indentfirst=False):
"""Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
.. sourcecode:: jinja
{{ mytext|indent(2, true) }}
indent by two spaces and indent the first line too.
"""
indention = u' ' * width
rv = (u'\n' + indention).join(s.splitlines())
if indentfirst:
rv = indention + rv
return rv
def do_truncate(s, length=255, killwords=False, end='...'):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
it will discard the last word. If the text was in fact
truncated it will append an ellipsis sign (``"..."``). If you want a
different ellipsis sign than ``"..."`` you can specify it using the
third parameter.
.. sourcecode:: jinja
{{ "foo bar"|truncate(5) }}
-> "foo ..."
{{ "foo bar"|truncate(5, True) }}
-> "foo b..."
"""
if len(s) <= length:
return s
elif killwords:
return s[:length] + end
words = s.split(' ')
result = []
m = 0
for word in words:
m += len(word) + 1
if m > length:
break
result.append(word)
result.append(end)
return u' '.join(result)
@environmentfilter
def do_wordwrap(environment, s, width=79, break_long_words=True,
wrapstring=None):
"""
Return a copy of the string passed to the filter wrapped after
``79`` characters. You can override this default using the first
parameter. If you set the second parameter to `false` Jinja will not
split words apart if they are longer than `width`. By default, the newlines
will be the default newlines for the environment, but this can be changed
using the wrapstring keyword argument.
.. versionadded:: 2.7
Added support for the `wrapstring` parameter.
"""
if not wrapstring:
wrapstring = environment.newline_sequence
import textwrap
return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words))
def do_wordcount(s):
"""Count the words in that string."""
return len(_word_re.findall(s))
def do_int(value, default=0):
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter.
"""
try:
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
def do_float(value, default=0.0):
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default
def do_format(value, *args, **kwargs):
"""
Apply python string formatting on an object:
.. sourcecode:: jinja
{{ "%s - %s"|format("Hello?", "Foo!") }}
-> Hello? - Foo!
"""
if args and kwargs:
raise FilterArgumentError('can\'t handle positional and keyword '
'arguments at the same time')
return soft_unicode(value) % (kwargs or args)
def do_trim(value):
"""Strip leading and trailing whitespace."""
return soft_unicode(value).strip()
def do_striptags(value):
"""Strip SGML/XML tags and replace adjacent whitespace by one space.
"""
if hasattr(value, '__html__'):
value = value.__html__()
return Markup(text_type(value)).striptags()
def do_slice(value, slices, fill_with=None):
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
.. sourcecode:: html+jinja
<div class="columwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
{%- endfor %}
</div>
If you pass it a second argument it's used to fill missing
values on the last iteration.
"""
seq = list(value)
length = len(seq)
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
for slice_number in range(slices):
start = offset + slice_number * items_per_slice
if slice_number < slices_with_extra:
offset += 1
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
yield tmp
def do_batch(value, linecount, fill_with=None):
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill up missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
result = []
tmp = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp
def do_round(value, precision=0, method='common'):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43.0
{{ 42.55|round(1, 'floor') }}
-> 42.5
Note that even if rounded to 0 precision, a float is returned. If
you need a real integer, pipe it through `int`:
.. sourcecode:: jinja
{{ 42.55|round|int }}
-> 43
"""
if not method in ('common', 'ceil', 'floor'):
raise FilterArgumentError('method must be common, ceil or floor')
if method == 'common':
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
@environmentfilter
def do_groupby(environment, value, attribute):
"""Group a sequence of objects by a common attribute.
If you for example have a list of dicts or objects that represent persons
with `gender`, `first_name` and `last_name` attributes and you want to
group all users by genders you can do something like the following
snippet:
.. sourcecode:: html+jinja
<ul>
{% for group in persons|groupby('gender') %}
<li>{{ group.grouper }}<ul>
{% for person in group.list %}
<li>{{ person.first_name }} {{ person.last_name }}</li>
{% endfor %}</ul></li>
{% endfor %}
</ul>
Additionally it's possible to use tuple unpacking for the grouper and
list:
.. sourcecode:: html+jinja
<ul>
{% for grouper, list in persons|groupby('gender') %}
...
{% endfor %}
</ul>
As you can see the item we're grouping by is stored in the `grouper`
attribute and the `list` contains all the objects that have this grouper
in common.
.. versionchanged:: 2.6
It's now possible to use dotted notation to group by the child
attribute of another attribute.
"""
expr = make_attrgetter(environment, attribute)
return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
class _GroupTuple(tuple):
__slots__ = ()
grouper = property(itemgetter(0))
list = property(itemgetter(1))
def __new__(cls, xxx_todo_changeme):
(key, value) = xxx_todo_changeme
return tuple.__new__(cls, (key, list(value)))
@environmentfilter
def do_sum(environment, iterable, attribute=None, start=0):
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The `attribute` parameter was added to allow suming up over
attributes. Also the `start` parameter was moved on to the right.
"""
if attribute is not None:
iterable = imap(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start)
def do_list(value):
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
def do_mark_safe(value):
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value):
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return text_type(value)
def do_reverse(value):
"""Reverse the object or return an iterator the iterates over it the other
way round.
"""
if isinstance(value, string_types):
return value[::-1]
try:
return reversed(value)
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError:
raise FilterArgumentError('argument must be iterable')
@environmentfilter
def do_attr(environment, obj, name):
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo["bar"]`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed and not \
environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@contextfilter
def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7
"""
context = args[0]
seq = args[1]
if len(args) == 2 and 'attribute' in kwargs:
attribute = kwargs.pop('attribute')
if kwargs:
raise FilterArgumentError('Unexpected keyword argument %r' %
next(iter(kwargs)))
func = make_attrgetter(context.environment, attribute)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
raise FilterArgumentError('map requires a filter argument')
func = lambda item: context.environment.call_filter(
name, item, args, kwargs, context=context)
if seq:
for item in seq:
yield func(item)
@contextfilter
def do_select(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and only selecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: x, False)
@contextfilter
def do_reject(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and rejecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: not x, False)
@contextfilter
def do_selectattr(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and only selecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: x, True)
@contextfilter
def do_rejectattr(*args, **kwargs):
"""Filters a sequence of objects by appying a test to either the object
or the attribute and rejecting the ones with the test succeeding.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: not x, True)
def _select_or_reject(args, kwargs, modfunc, lookup_attr):
context = args[0]
seq = args[1]
if lookup_attr:
try:
attr = args[2]
except LookupError:
raise FilterArgumentError('Missing parameter for attribute name')
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
transfunc = lambda x: x
try:
name = args[2 + off]
args = args[3 + off:]
func = lambda item: context.environment.call_test(
name, item, args, kwargs)
except LookupError:
func = bool
if seq:
for item in seq:
if modfunc(func(transfunc(item))):
yield item
FILTERS = {
'attr': do_attr,
'replace': do_replace,
'upper': do_upper,
'lower': do_lower,
'escape': escape,
'e': escape,
'forceescape': do_forceescape,
'capitalize': do_capitalize,
'title': do_title,
'default': do_default,
'd': do_default,
'join': do_join,
'count': len,
'dictsort': do_dictsort,
'sort': do_sort,
'length': len,
'reverse': do_reverse,
'center': do_center,
'indent': do_indent,
'title': do_title,
'capitalize': do_capitalize,
'first': do_first,
'last': do_last,
'map': do_map,
'random': do_random,
'reject': do_reject,
'rejectattr': do_rejectattr,
'filesizeformat': do_filesizeformat,
'pprint': do_pprint,
'truncate': do_truncate,
'wordwrap': do_wordwrap,
'wordcount': do_wordcount,
'int': do_int,
'float': do_float,
'string': soft_unicode,
'list': do_list,
'urlize': do_urlize,
'format': do_format,
'trim': do_trim,
'striptags': do_striptags,
'select': do_select,
'selectattr': do_selectattr,
'slice': do_slice,
'batch': do_batch,
'sum': do_sum,
'abs': abs,
'round': do_round,
'groupby': do_groupby,
'safe': do_mark_safe,
'xmlattr': do_xmlattr,
'urlencode': do_urlencode
}
|
agconti/njode | refs/heads/master | env/lib/python2.7/site-packages/django/core/checks/__init__.py | 58 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .messages import (CheckMessage,
Debug, Info, Warning, Error, Critical,
DEBUG, INFO, WARNING, ERROR, CRITICAL)
from .registry import register, run_checks, tag_exists, Tags
# Import these to force registration of checks
import django.core.checks.compatibility.django_1_6_0 # NOQA
import django.core.checks.compatibility.django_1_7_0 # NOQA
import django.core.checks.model_checks # NOQA
__all__ = [
'CheckMessage',
'Debug', 'Info', 'Warning', 'Error', 'Critical',
'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL',
'register', 'run_checks', 'tag_exists', 'Tags',
]
|
emaeliena/PerfKitBenchmarker | refs/heads/master | perfkitbenchmarker/deployment/shared/ini_constants.py | 10 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used to parse the deployment .ini file."""
# Network section, option names.
SECTION_NETWORK = 'network'
OPTION_NET_NAME = 'name'
OPTION_TCP = 'tcp'
OPTION_UDP = 'udp'
# Cluster section, option names.
SECTION_CLUSTER = 'cluster'
OPTION_CLUSTER_TYPE = 'type'
OPTION_PROJECT = 'project'
OPTION_SETUP_MODULES = 'setup_modules'
OPTION_ADMIN_USER = 'admin_user'
OPTION_ZONE = 'zone'
# Node section, option names.
SECTION_NODE_PREFIX = 'node:'
OPTION_PACKAGE_PREFIX = 'package.'
OPTION_PD_PREFIX = 'pd.'
OPTION_STATIC_REF_PREFIX = '@'
OPTION_RUNTIME_REF_PREFIX = '$'
OPTION_COUNT = 'count'
OPTION_ENTRYPOINT = 'entrypoint'
OPTION_IMAGE = 'image'
OPTION_VM_TYPE = 'vm_type'
# Helpful collections.
ALL_OPTIONS = [
OPTION_NET_NAME, OPTION_TCP, OPTION_UDP, OPTION_CLUSTER_TYPE,
OPTION_SETUP_MODULES, OPTION_PROJECT, OPTION_ZONE, OPTION_ADMIN_USER,
OPTION_COUNT, OPTION_IMAGE, OPTION_VM_TYPE, OPTION_ENTRYPOINT]
NODE_OPTIONS = [
OPTION_PD_PREFIX, OPTION_COUNT, OPTION_IMAGE, OPTION_VM_TYPE,
OPTION_ENTRYPOINT]
ALL_OPTION_PREFIXES = [
OPTION_PACKAGE_PREFIX, OPTION_PD_PREFIX, OPTION_STATIC_REF_PREFIX]
|
peterlauri/django | refs/heads/master | django/contrib/staticfiles/management/commands/collectstatic.py | 15 | from __future__ import unicode_literals
import os
from collections import OrderedDict
from django.apps import apps
from django.contrib.staticfiles.finders import get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import FileSystemStorage
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.six.moves import input
class Command(BaseCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
help = "Collect static files in a single location."
requires_system_checks = False
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.copied_files = []
self.symlinked_files = []
self.unmodified_files = []
self.post_processed_files = []
self.storage = staticfiles_storage
self.style = no_style()
@cached_property
def local(self):
try:
self.storage.path('')
except NotImplementedError:
return False
return True
def add_arguments(self, parser):
parser.add_argument(
'--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help="Do NOT prompt the user for input of any kind.",
)
parser.add_argument(
'--no-post-process',
action='store_false', dest='post_process', default=True,
help="Do NOT post process collected files.",
)
parser.add_argument(
'-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more.",
)
parser.add_argument(
'-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem.",
)
parser.add_argument(
'-c', '--clear',
action='store_true', dest='clear', default=False,
help="Clear the existing files using the storage "
"before trying to copy or link the original file.",
)
parser.add_argument(
'-l', '--link',
action='store_true', dest='link', default=False,
help="Create a symbolic link to each file instead of copying.",
)
parser.add_argument(
'--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns (defaults to 'CVS', '.*' and '*~').",
)
def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = options['verbosity']
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += apps.get_app_config('staticfiles').ignore_patterns
self.ignore_patterns = list(set(ignore_patterns))
self.post_process = options['post_process']
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle() to facilitate testing.
"""
if self.symlink and not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = OrderedDict()
for finder in get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
else:
self.log(
"Found another file with the destination path '%s'. It "
"will be ignored since only the first encountered file "
"is collected. If this is not what you want, make sure "
"every static file has a unique path." % prefixed_path,
level=1,
)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
def handle(self, **options):
self.set_options(**options)
message = ['\n']
if self.dry_run:
message.append(
'You have activated the --dry-run option so no files will be modified.\n\n'
)
message.append(
'You have requested to collect static files at the destination\n'
'location as specified in your settings'
)
if self.is_local_storage() and self.storage.location:
destination_path = self.storage.location
message.append(':\n\n %s\n\n' % destination_path)
should_warn_user = (
self.storage.exists(destination_path) and
any(self.storage.listdir(destination_path))
)
else:
destination_path = None
message.append('.\n\n')
# Destination files existence not checked; play it safe and warn.
should_warn_user = True
if self.interactive and should_warn_user:
if self.clear:
message.append('This will DELETE ALL FILES in this location!\n')
else:
message.append('This will overwrite existing files!\n')
message.append(
'Are you sure you want to do this?\n\n'
"Type 'yes' to continue, or 'no' to cancel: "
)
if input(''.join(message)) != 'yes':
raise CommandError("Collecting static files cancelled.")
collected = self.collect()
modified_count = len(collected['modified'])
unmodified_count = len(collected['unmodified'])
post_processed_count = len(collected['post_processed'])
if self.verbosity >= 1:
template = ("\n%(modified_count)s %(identifier)s %(action)s"
"%(destination)s%(unmodified)s%(post_processed)s.\n")
summary = template % {
'modified_count': modified_count,
'identifier': 'static file' + ('' if modified_count == 1 else 's'),
'action': 'symlinked' if self.symlink else 'copied',
'destination': (" to '%s'" % destination_path if destination_path else ''),
'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''),
'post_processed': (collected['post_processed'] and
', %s post-processed'
% post_processed_count or ''),
}
return summary
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def is_local_storage(self):
return isinstance(self.storage, FileSystemStorage)
def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
if not self.storage.exists(path):
return
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" % force_text(fpath), level=1)
else:
self.log("Deleting '%s'" % force_text(fpath), level=1)
try:
full_path = self.storage.path(fpath)
except NotImplementedError:
self.storage.delete(fpath)
else:
if not os.path.exists(full_path) and os.path.lexists(full_path):
# Delete broken symlinks
os.unlink(full_path)
else:
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = self.storage.get_modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support get_modified_time() or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.get_modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0) >= source_last_modified.replace(microsecond=0) and
full_path and not (self.symlink ^ os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except OSError as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
self.copied_files.append(prefixed_path)
|
sszlm/MissionPlanner | refs/heads/master | Lib/site-packages/scipy/ndimage/setupscons.py | 64 | from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
from numpy import get_include
def configuration(parent_package='', top_path=None):
config = Configuration('ndimage', parent_package, top_path)
config.add_sconscript("SConstruct")
config.add_data_dir('tests')
return config
if __name__ == '__main__':
setup(**configuration(top_path='').todict())
|
dongjoon-hyun/tensorflow | refs/heads/master | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 16 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import urllib
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
layers = tf.keras.layers
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = maybe_download(os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tf.keras.Model):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
self.cells = tf.contrib.checkpoint.List(
[tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])
self.relu = layers.Dense(
label_dimension, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
"""Implements the RNN logic and prediction generation.
Args:
inputs: A tuple (chars, sequence_length), where chars is a batch of
one-hot encoded color names represented as a Tensor with dimensions
[batch_size, time_steps, 256] and sequence_length holds the length
of each character sequence (color name) as a Tensor with dimension
[batch_size].
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
(chars, sequence_length) = inputs
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.square(predictions - labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
|
nrz/ylikuutio | refs/heads/master | external/bullet3/examples/pybullet/gym/pybullet_envs/prediction/pybullet_sim_gym_env.py | 2 | """This file implements the gym environment of example PyBullet simulation.
"""
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import math
import time
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import pybullet
from pybullet_utils import bullet_client as bc
from pybullet_envs.prediction import boxstack_pybullet_sim
import os
import pybullet_data
from pkg_resources import parse_version
class PyBulletSimGymEnv(gym.Env):
"""The gym environment to run pybullet simulations.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 50}
def __init__(self,
pybullet_sim_factory=boxstack_pybullet_sim,
render=True,
render_sleep=False,
debug_visualization=True,
hard_reset=False,
render_width=240,
render_height=240,
action_repeat=1,
time_step=1. / 240.,
num_bullet_solver_iterations=50,
urdf_root=pybullet_data.getDataPath()):
"""Initialize the gym environment.
Args:
urdf_root: The path to the urdf data folder.
"""
self._pybullet_sim_factory = pybullet_sim_factory
self._time_step = time_step
self._urdf_root = urdf_root
self._observation = []
self._action_repeat = action_repeat
self._num_bullet_solver_iterations = num_bullet_solver_iterations
self._env_step_counter = 0
self._is_render = render
self._debug_visualization = debug_visualization
self._render_sleep = render_sleep
self._render_width = render_width
self._render_height = render_height
self._cam_dist = .3
self._cam_yaw = 50
self._cam_pitch = -35
self._hard_reset = True
self._last_frame_time = 0.0
optionstring = '--width={} --height={}'.format(render_width, render_height)
print("urdf_root=" + self._urdf_root)
if self._is_render:
self._pybullet_client = bc.BulletClient(connection_mode=pybullet.GUI,
options=optionstring)
else:
self._pybullet_client = bc.BulletClient()
if (debug_visualization == False):
self._pybullet_client.configureDebugVisualizer(flag=self._pybullet_client.COV_ENABLE_GUI,
enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_RGB_BUFFER_PREVIEW, enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_DEPTH_BUFFER_PREVIEW, enable=0)
self._pybullet_client.configureDebugVisualizer(
flag=self._pybullet_client.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, enable=0)
self._pybullet_client.setAdditionalSearchPath(urdf_root)
self.seed()
self.reset()
observation_high = (self._example_sim.GetObservationUpperBound())
observation_low = (self._example_sim.GetObservationLowerBound())
action_dim = self._example_sim.GetActionDimension()
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(observation_low, observation_high)
self.viewer = None
self._hard_reset = hard_reset # This assignment need to be after reset()
def configure(self, args):
self._args = args
def reset(self):
if self._hard_reset:
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=int(self._num_bullet_solver_iterations))
self._pybullet_client.setTimeStep(self._time_step)
self._example_sim = self._pybullet_sim_factory.CreateSim(
pybullet_client=self._pybullet_client,
urdf_root=self._urdf_root,
time_step=self._time_step)
else:
self._example_sim.Reset(reload_urdf=False)
self._env_step_counter = 0
#self._pybullet_client.resetDebugVisualizerCamera(
# self._cam_dist, self._cam_yaw, self._cam_pitch, [0, 0, 0])
return self._get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""Step forward the simulation, given the action.
Args:
action: the predicted state
Returns:
observations: The actual state.
reward: The reward for how well the prediction matches the actual state.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
if self._render_sleep:
# Sleep, otherwise the computation takes less time than real time,
# which will make the visualization like a fast-forward video.
time_spent = time.time() - self._last_frame_time
self._last_frame_time = time.time()
time_to_sleep = self._action_repeat * self._time_step - time_spent
if time_to_sleep > 0:
time.sleep(time_to_sleep)
#base_pos = self.minitaur.GetBasePosition()
#self._pybullet_client.resetDebugVisualizerCamera(
# self._cam_dist, self._cam_yaw, self._cam_pitch, base_pos)
for _ in range(self._action_repeat):
self._example_sim.ApplyAction(action)
self._pybullet_client.stepSimulation()
self._env_step_counter += 1
reward = self._reward()
done = self._termination()
return np.array(self._get_observation()), reward, done, {}
def render(self, mode="rgb_array", close=False):
if mode != "rgb_array":
return np.array([])
base_pos = [0, 0, 0]
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width) / self._render_width, nearVal=0.01, farVal=100.0)
proj_matrix = [
1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, -1.0000200271606445, -1.0, 0.0, 0.0,
-0.02000020071864128, 0.0
]
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=self._render_width,
height=self._render_height,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=pybullet.ER_BULLET_HARDWARE_OPENGL) #ER_TINY_RENDERER)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (self._render_height, self._render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _termination(self):
terminate = self._example_sim.Termination()
return terminate
def _reward(self):
reward = 0
return reward
def _get_observation(self):
self._observation = self._example_sim.GetObservation()
return self._observation
if parse_version(gym.__version__) < parse_version('0.9.6'):
_render = render
_reset = reset
_seed = seed
_step = step
|
sergei-maertens/django | refs/heads/master | tests/check_framework/urls/include_with_dollar.py | 109 | from django.conf.urls import include, url
urlpatterns = [
url(r'^include-with-dollar$', include([])),
]
|
alexschiller/osf.io | refs/heads/develop | scripts/osfstorage/repopulate_sha.py | 42 | import sys
import logging
from modularodm import Q
from website.app import init_app
from website.files.models import FileVersion
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
def do_migration():
logger.info('Starting sha256 recovery migration')
for version in FileVersion.find(Q('metadata.sha256', 'eq', None)):
if not version.location:
continue
logger.debug('Adding sha {} to version {}'.format(version.location['object'], version._id))
version.metadata['sha256'] = version.location['object']
version.save()
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
|
hortonworks/hortonworks-sandbox | refs/heads/master | desktop/core/ext-py/Django-1.2.3/django/conf/locale/pt/formats.py | 80 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = r'j \de F \de Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'j \de F \de Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \de Y'
MONTH_DAY_FORMAT = r'j \de F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
epssy/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/django/conf/locale/es_MX/formats.py | 118 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'j \d\e F \d\e Y \a \l\a\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday: ISO 8601
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y%m%d', # '20061025'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M:%S.%f',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M:%S.%f',
'%d/%m/%y %H:%M',
)
DECIMAL_SEPARATOR = '.' # ',' is also official (less common): NOM-008-SCFI-2002
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
josesanch/django-oscar | refs/heads/master | src/oscar/apps/wishlists/admin.py | 36 | from django.contrib import admin
from oscar.core.loading import get_model
WishList = get_model('wishlists', 'WishList')
Line = get_model('wishlists', 'Line')
admin.site.register(WishList)
admin.site.register(Line)
|
xica/hieratic-dynamodb | refs/heads/master | tests/test_flat.py | 1 | from pytest import fixture, raises
from datetime import datetime
from hieratic.collection import CollectionResource
from hieratic.index import SimpleIndex
@fixture
def UsersResource(UserResource, ddb_region, ddb_host, ddb_port):
@CollectionResource.define(
item_class=UserResource,
primary_index=SimpleIndex(('organization_id', int), ('id', int)),
)
class UsersRes(CollectionResource):
def __init__(self):
CollectionResource.__init__(self, None, 'users', 'dynamodb', 'HieraticDynamoDBTestUser', ddb_region, ddb_host, False, ddb_port)
return UsersRes
class TestFlat(object):
def test_flat(self, user_table, UsersResource, User):
users_resource = UsersResource()
now = datetime.now()
user_resource = users_resource.create(User(organization_id=0, id=0, created_at=now))
user = user_resource.data
assert user.organization_id == 0
assert user.id == 0
assert user.created_at == now
user_resource = users_resource['0_0']
user = user_resource.data
assert user.organization_id == 0
assert user.id == 0
assert user.created_at == now
user_ressource = users_resource.retrieve(0, 0)
user = user_resource.data
assert user.organization_id == 0
assert user.id == 0
assert user.created_at == now
user_resource.update(name='updated')
user = user_resource.data
assert user.name == 'updated'
user_resource.delete()
user = user_resource.data
assert user is None
with raises(KeyError):
users_resource['0_0']
with CollectionResource.get_context('dynamodb') as context:
users_resource.create(User(organization_id=0, id=1), context)
users_resource.create(User(organization_id=0, id=2), context)
users_resource.create(User(organization_id=0, id=3), context)
assert len(list(users_resource.query(organization_id__eq=0))) == 0
assert [1, 2, 3] == [u_res.data.id for u_res in users_resource.query(organization_id__eq=0, reverse=True)]
assert [1, 3] == [
u_res.data.id for u_res in
users_resource.bulk_get(keys=[{'organization_id': 0, 'id': 1},
{'organization_id': 0, 'id': 3}])
]
|
fitermay/intellij-community | refs/heads/master | python/testData/refactoring/extractmethod/MethodContext.after.py | 71 | class C:
def foo(self):
self.bar()
def bar(self):
for x in [1, 2]:
print x |
aroraenterprise/projecteos | refs/heads/master | backend/pylibs/werkzeug/local.py | 159 | # -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import copy
from functools import update_wrapper
from werkzeug.wsgi import ClosingIterator
from werkzeug._compat import PY2, implements_bool
# since each thread has its own greenlet we can just use those as identifiers
# for the context. If greenlets are not available we fall back to the
# current thread ident depending on where it is.
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__ident_func__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__ident_func__', get_ident)
def __iter__(self):
return iter(self.__storage__.items())
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
try:
return self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
ident = self.__ident_func__()
storage = self.__storage__
try:
storage[ident][name] = value
except KeyError:
storage[ident] = {name: value}
def __delattr__(self, name):
try:
del self.__storage__[self.__ident_func__()][name]
except KeyError:
raise AttributeError(name)
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
def __release_local__(self):
self._local.__release_local__()
def _get__ident_func__(self):
return self._local.__ident_func__
def _set__ident_func__(self, value):
object.__setattr__(self._local, '__ident_func__', value)
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
del _get__ident_func__, _set__ident_func__
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
The `ident_func` parameter can be added to override the default ident
function for the wrapped locals.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
.. versionchanged:: 0.7
`ident_func` was added.
"""
def __init__(self, locals=None, ident_func=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
self.ident_func = ident_func
for local in self.locals:
object.__setattr__(local, '__ident_func__', ident_func)
else:
self.ident_func = get_ident
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
return self.ident_func()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
@implements_bool
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug.local import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug.local import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
raise AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __bool__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object()) # noqa
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object()) # noqa
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
__copy__ = lambda x: copy.copy(x._get_current_object())
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
|
jruiperezv/ANALYSE | refs/heads/master | lms/lib/comment_client/__init__.py | 259 | from .comment_client import *
from .utils import (
CommentClientError, CommentClientRequestError,
CommentClient500Error, CommentClientMaintenanceError
)
|
Strassengezwitscher/Strassengezwitscher | refs/heads/develop | crowdgezwitscher/facebook/migrations/0001_initial.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-24 13:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FacebookLikeStatistic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('like_count', models.PositiveIntegerField()),
],
),
migrations.CreateModel(
name='FacebookPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='unbenannt', max_length=100)),
('active', models.BooleanField(default=False)),
('location', models.CharField(max_length=100)),
('location_lat', models.DecimalField(decimal_places=6, max_digits=9)),
('location_long', models.DecimalField(decimal_places=6, max_digits=9)),
('notes', models.TextField(blank=True)),
('facebook_id', models.CharField(max_length=50)),
('events', models.ManyToManyField(blank=True, to='events.Event')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='facebooklikestatistic',
name='page',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='facebook.FacebookPage'),
),
]
|
Shiroy/servo | refs/heads/master | tests/wpt/css-tests/tools/html5lib/html5lib/treewalkers/pulldom.py | 1729 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
ZazieTheBeast/oscar | refs/heads/master | oscar/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/pulldom.py | 1729 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
phildini/petroglyphs | refs/heads/master | petroglyphs/models.py | 1 | from django.db import models
from django.contrib.sites.models import Site
class Setting(models.Model):
key = models.CharField(max_length=50, unique=True)
value = models.TextField()
export_to_template = models.BooleanField(default=False)
export_to_context = models.BooleanField(default=False)
show_in_admin = models.BooleanField(default=True)
sites = models.ManyToManyField(Site, blank=True, null=True)
def __str__(self):
return self.key
def __unicode__(self):
return self.key
|
gogobook/wagtail | refs/heads/master | wagtail/wagtailusers/tests.py | 2 | from __future__ import unicode_literals
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.utils import six
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore import hooks
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import (
Page, GroupPagePermission, Collection, GroupCollectionPermission
)
class TestUserIndexView(TestCase, WagtailTestUtils):
def setUp(self):
# create a user that should be visible in the listing
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/index.html')
self.assertContains(response, 'testuser')
def test_allows_negative_ids(self):
# see https://github.com/torchbox/wagtail/issues/565
get_user_model().objects.create_user('guardian', 'guardian@example.com', 'gu@rd14n', id=-1)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'testuser')
self.assertContains(response, 'guardian')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestUserCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_users:add'), params)
def post(self, post_data={}):
return self.client.post(reverse('wagtailusers_users:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
def test_create(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 1)
self.assertEqual(users.first().email, 'test@user.com')
def test_create_with_password_mismatch(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Test",
'last_name': "User",
'password1': "password1",
'password2': "password2",
})
# Should remain on page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/create.html')
self.assertTrue(response.context['form'].errors['password2'])
# Check that the user was not created
users = get_user_model().objects.filter(username='testuser')
self.assertEqual(users.count(), 0)
class TestUserEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user to edit
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
# Login
self.login()
def get(self, params={}, user_id=None):
return self.client.get(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), params)
def post(self, post_data={}, user_id=None):
return self.client.post(reverse('wagtailusers_users:edit', args=(user_id or self.test_user.id, )), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/users/edit.html')
def test_nonexistant_redirect(self):
self.assertEqual(self.get(user_id=100000).status_code, 404)
def test_edit(self):
response = self.post({
'username': "testuser",
'email': "test@user.com",
'first_name': "Edited",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_users:index'))
# Check that the user was edited
user = get_user_model().objects.get(id=self.test_user.id)
self.assertEqual(user.first_name, 'Edited')
def test_edit_validation_error(self):
# Leave "username" field blank. This should give a validation error
response = self.post({
'username': "",
'email': "test@user.com",
'first_name': "Teset",
'last_name': "User",
'password1': "password",
'password2': "password",
})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
class TestUserProfileCreation(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = get_user_model().objects.create_user(
username='testuser',
email='testuser@email.com',
password='password'
)
def test_user_created_without_profile(self):
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 0)
with self.assertRaises(UserProfile.DoesNotExist):
self.test_user.userprofile
def test_user_profile_created_when_method_called(self):
self.assertIsInstance(UserProfile.get_for_user(self.test_user), UserProfile)
# and get it from the db too
self.assertEqual(UserProfile.objects.filter(user=self.test_user).count(), 1)
class TestGroupIndexView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:index'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/index.html')
def test_search(self):
response = self.get({'q': "Hello"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['query_string'], "Hello")
def test_pagination(self):
pages = ['0', '1', '-1', '9999', 'Not a page']
for page in pages:
response = self.get({'p': page})
self.assertEqual(response.status_code, 200)
class TestGroupCreateView(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.add_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
self.change_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='change_document'
)
def get(self, params={}):
return self.client.get(reverse('wagtailusers_groups:add'), params)
def post(self, post_data={}):
post_defaults = {
'page_permissions-TOTAL_FORMS': ['0'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['0'],
'document_permissions-TOTAL_FORMS': ['0'],
'document_permissions-MAX_NUM_FORMS': ['1000'],
'document_permissions-INITIAL_FORMS': ['0'],
'image_permissions-TOTAL_FORMS': ['0'],
'image_permissions-MAX_NUM_FORMS': ['1000'],
'image_permissions-INITIAL_FORMS': ['0'],
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse('wagtailusers_groups:add'), post_data)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/create.html')
def test_create_group(self):
response = self.post({'name': "test group"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the user was created
groups = Group.objects.filter(name='test group')
self.assertEqual(groups.count(), 1)
def test_group_create_adding_permissions(self):
response = self.post({
'name': "test group",
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_types': ['edit', 'publish'],
'page_permissions-TOTAL_FORMS': ['1'],
'document_permissions-0-collection': [Collection.get_first_root_node().id],
'document_permissions-0-permissions': [self.add_doc_permission.id],
'document_permissions-TOTAL_FORMS': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now exists, with two page permissions
# and one 'add document' collection permission
new_group = Group.objects.get(name='test group')
self.assertEqual(new_group.page_permissions.all().count(), 2)
self.assertEqual(
new_group.collection_permissions.filter(permission=self.add_doc_permission).count(),
1
)
def test_duplicate_page_permissions_error(self):
# Try to submit multiple page permission entries for the same page
response = self.post({
'name': "test group",
'page_permissions-0-page': ['1'],
'page_permissions-0-permission_types': ['publish'],
'page_permissions-1-page': ['1'],
'page_permissions-1-permission_types': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# formset should have a non-form error about the duplication
self.assertTrue(response.context['permission_panels'][0].non_form_errors)
def test_duplicate_document_permissions_error(self):
# Try to submit multiple document permission entries for the same collection
root_collection = Collection.get_first_root_node()
response = self.post({
'name': "test group",
'document_permissions-0-collection': [root_collection.id],
'document_permissions-0-permissions': [self.add_doc_permission.id],
'document_permissions-1-collection': [root_collection.id],
'document_permissions-1-permissions': [self.change_doc_permission.id],
'document_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# formset should have a non-form error about the duplication
# (we don't know what index in permission_panels the formset will be,
# so just assert that it happens on at least one permission_panel)
self.assertTrue(
any(
hasattr(panel, 'non_form_errors') and panel.non_form_errors
for panel in response.context['permission_panels']
)
)
class TestGroupEditView(TestCase, WagtailTestUtils):
def setUp(self):
# Create a group to edit
self.test_group = Group.objects.create(name='test group')
self.root_page = Page.objects.get(id=1)
self.root_add_permission = GroupPagePermission.objects.create(page=self.root_page,
permission_type='add',
group=self.test_group)
self.home_page = Page.objects.get(id=2)
# Get the hook-registered permissions, and add one to this group
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.existing_permission = self.registered_permissions.order_by('pk')[0]
self.another_permission = self.registered_permissions.order_by('pk')[1]
self.test_group.permissions.add(self.existing_permission)
# set up collections to test document permissions
self.root_collection = Collection.get_first_root_node()
self.evil_plans_collection = self.root_collection.add_child(name="Evil plans")
self.add_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='add_document'
)
self.change_doc_permission = Permission.objects.get(
content_type__app_label='wagtaildocs', codename='change_document'
)
GroupCollectionPermission.objects.create(
group=self.test_group,
collection=self.evil_plans_collection,
permission=self.add_doc_permission,
)
# Login
self.login()
def get(self, params={}, group_id=None):
return self.client.get(reverse('wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), params)
def post(self, post_data={}, group_id=None):
post_defaults = {
'name': 'test group',
'permissions': [self.existing_permission.id],
'page_permissions-TOTAL_FORMS': ['1'],
'page_permissions-MAX_NUM_FORMS': ['1000'],
'page_permissions-INITIAL_FORMS': ['1'],
'page_permissions-0-page': [self.root_page.id],
'page_permissions-0-permission_types': ['add'],
'document_permissions-TOTAL_FORMS': ['1'],
'document_permissions-MAX_NUM_FORMS': ['1000'],
'document_permissions-INITIAL_FORMS': ['1'],
'document_permissions-0-collection': [self.evil_plans_collection.id],
'document_permissions-0-permissions': [self.add_doc_permission.id],
'image_permissions-TOTAL_FORMS': ['0'],
'image_permissions-MAX_NUM_FORMS': ['1000'],
'image_permissions-INITIAL_FORMS': ['0'],
}
for k, v in six.iteritems(post_defaults):
post_data[k] = post_data.get(k, v)
return self.client.post(reverse(
'wagtailusers_groups:edit', args=(group_id or self.test_group.id, )), post_data)
def add_non_registered_perm(self):
# Some groups may have django permissions assigned that are not
# hook-registered as part of the wagtail interface. We need to ensure
# that these permissions are not overwritten by our views.
# Tests that use this method are testing the aforementioned
# functionality.
self.non_registered_perms = Permission.objects.exclude(id__in=self.registered_permissions)
self.non_registered_perm = self.non_registered_perms[0]
self.test_group.permissions.add(self.non_registered_perm)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailusers/groups/edit.html')
def test_nonexistant_group_redirect(self):
self.assertEqual(self.get(group_id=100000).status_code, 404)
def test_group_edit(self):
response = self.post({'name': "test group edited"})
# Should redirect back to index
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# Check that the group was edited
group = Group.objects.get(id=self.test_group.id)
self.assertEqual(group.name, 'test group edited')
def test_group_edit_validation_error(self):
# Leave "name" field blank. This should give a validation error
response = self.post({'name': ""})
# Should not redirect to index
self.assertEqual(response.status_code, 200)
def test_group_edit_adding_page_permissions_same_page(self):
# The test group has one page permission to begin with - 'add' permission on root.
# Add two additional permission types on the root page
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-permission_types': ['add', 'publish', 'edit'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three page permissions
self.assertEqual(self.test_group.page_permissions.count(), 3)
def test_group_edit_adding_document_permissions_same_collection(self):
# The test group has one document permission to begin with -
# 'add' permission on evil_plans.
# Add 'change' permission on evil_plans
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-0-permissions': [
self.add_doc_permission.id, self.change_doc_permission.id
],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has two document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
2
)
def test_group_edit_adding_document_permissions_different_collection(self):
# The test group has one document permission to begin with -
# 'add' permission on evil_plans.
# Add 'add' and 'change' permission on the root collection
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-TOTAL_FORMS': ['2'],
'document_permissions-1-collection': [self.root_collection.id],
'document_permissions-1-permissions': [
self.add_doc_permission.id, self.change_doc_permission.id
],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has three document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
3
)
def test_group_edit_deleting_page_permissions(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.post({
'page_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero page permissions
self.assertEqual(self.test_group.page_permissions.count(), 0)
def test_group_edit_deleting_document_permissions(self):
# The test group has one document permission to begin with
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
1
)
response = self.post({
'document_permissions-0-DELETE': ['1'],
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
# The test group now has zero document permissions
self.assertEqual(
self.test_group.collection_permissions.filter(
permission__content_type__app_label='wagtaildocs'
).count(),
0
)
def test_group_edit_loads_with_page_permissions_shown(self):
# The test group has one page permission to begin with
self.assertEqual(self.test_group.page_permissions.count(), 1)
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(
page_permissions_formset.management_form['INITIAL_FORMS'].value(),
1
)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.id
)
self.assertEqual(
page_permissions_formset.forms[0]['permission_types'].value(),
['add']
)
# add edit permission on root
GroupPagePermission.objects.create(
page=self.root_page, permission_type='edit', group=self.test_group
)
# The test group now has two page permissions on root (but only one form covering both)
self.assertEqual(self.test_group.page_permissions.count(), 2)
# Reload the page and check the form instances
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(page_permissions_formset.management_form['INITIAL_FORMS'].value(), 1)
self.assertEqual(len(page_permissions_formset.forms), 1)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.id
)
self.assertEqual(
page_permissions_formset.forms[0]['permission_types'].value(),
['add', 'edit']
)
# add edit permission on home
GroupPagePermission.objects.create(
page=self.home_page, permission_type='edit', group=self.test_group
)
# The test group now has three page permissions, over two forms
self.assertEqual(self.test_group.page_permissions.count(), 3)
# Reload the page and check the form instances
response = self.get()
page_permissions_formset = response.context['permission_panels'][0]
self.assertEqual(page_permissions_formset.management_form['INITIAL_FORMS'].value(), 2)
self.assertEqual(
page_permissions_formset.forms[0]['page'].value(),
self.root_page.id
)
self.assertEqual(
page_permissions_formset.forms[0]['permission_types'].value(),
['add', 'edit']
)
self.assertEqual(
page_permissions_formset.forms[1]['page'].value(),
self.home_page.id
)
self.assertEqual(
page_permissions_formset.forms[1]['permission_types'].value(),
['edit']
)
def test_duplicate_page_permissions_error(self):
# Try to submit multiple page permission entries for the same page
response = self.post({
'page_permissions-1-page': [self.root_page.id],
'page_permissions-1-permission_types': ['edit'],
'page_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the formset should have a non-form error
self.assertTrue(response.context['permission_panels'][0].non_form_errors)
def test_duplicate_document_permissions_error(self):
# Try to submit multiple document permission entries for the same collection
response = self.post({
'document_permissions-1-page': [self.evil_plans_collection.id],
'document_permissions-1-permissions': [self.change_doc_permission],
'document_permissions-TOTAL_FORMS': ['2'],
})
self.assertEqual(response.status_code, 200)
# the formset should have a non-form error
self.assertTrue(
any(
hasattr(panel, 'non_form_errors') and panel.non_form_errors
for panel in response.context['permission_panels']
)
)
def test_group_add_registered_django_permissions(self):
# The test group has one django permission to begin with
self.assertEqual(self.test_group.permissions.count(), 1)
response = self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
self.assertRedirects(response, reverse('wagtailusers_groups:index'))
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_form_includes_non_registered_permissions_in_initial_data(self):
self.add_non_registered_perm()
original_permissions = self.test_group.permissions.all()
self.assertEqual(original_permissions.count(), 2)
response = self.get()
# See that the form is set up with the correct initial data
self.assertEqual(
response.context['form'].initial.get('permissions'),
list(original_permissions.values_list('id', flat=True))
)
def test_group_retains_non_registered_permissions_when_editing(self):
self.add_non_registered_perm()
original_permissions = list(self.test_group.permissions.all()) # list() to force evaluation
# submit the form with no changes (only submitting the exsisting
# permission, as in the self.post function definition)
self.post()
# See that the group has the same permissions as before
self.assertEqual(list(self.test_group.permissions.all()), original_permissions)
self.assertEqual(self.test_group.permissions.count(), 2)
def test_group_retains_non_registered_permissions_when_adding(self):
self.add_non_registered_perm()
# Add a second registered permission
self.post({
'permissions': [self.existing_permission.id, self.another_permission.id]
})
# See that there are now three permissions in total
self.assertEqual(self.test_group.permissions.count(), 3)
# ...including the non-registered one
self.assertIn(self.non_registered_perm, self.test_group.permissions.all())
def test_group_retains_non_registered_permissions_when_deleting(self):
self.add_non_registered_perm()
# Delete all registered permissions
self.post({'permissions': []})
# See that the non-registered permission is still there
self.assertEqual(self.test_group.permissions.count(), 1)
self.assertEqual(self.test_group.permissions.all()[0], self.non_registered_perm)
|
adrianmugnoz/Documentacion-Divulgame | refs/heads/master | readthedocs/core/management/commands/reindex_elasticsearch.py | 7 | import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from builds.models import Version
from search import parse_json
from restapi.utils import index_search_request
log = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-p',
dest='project',
default='',
help='Project to index'),
)
def handle(self, *args, **options):
'''
Build/index all versions or a single project's version
'''
project = options['project']
if project:
queryset = Version.objects.public(project__slug=project)
log.info("Building all versions for %s" % project)
elif getattr(settings, 'INDEX_ONLY_LATEST', True):
queryset = Version.objects.public().filter(slug='latest')
else:
queryset = Version.objects.public()
for version in queryset:
log.info("Reindexing %s" % version)
try:
commit = version.project.vcs_repo(version.slug).commit
except:
# This will happen on prod
commit = None
try:
page_list = parse_json.process_all_json_files(version, build_dir=False)
index_search_request(version=version, page_list=page_list, commit=commit)
except Exception:
log.error('Build failed for %s' % version, exc_info=True)
|
GbalsaC/bitnamiP | refs/heads/master | venv/lib/python2.7/site-packages/flask/ctx.py | 776 | # -*- coding: utf-8 -*-
"""
flask.ctx
~~~~~~~~~
Implements the objects required to keep the context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
return object.__repr__(self)
def after_this_request(f):
"""Executes a function after this request. This is useful to modify
response objects. The function is passed the response object and has
to return the same or a new one.
Example::
@app.route('/')
def index():
@after_this_request
def add_header(response):
response.headers['X-Foo'] = 'Parachute'
return response
return 'Hello World!'
This is more useful if a function other than the view function wants to
modify a response. For instance think of a decorator that wants to add
some headers without converting the return value into a response object.
.. versionadded:: 0.9
"""
_request_ctx_stack.top._after_request_functions.append(f)
return f
def copy_current_request_context(f):
"""A helper function that decorates a function to retain the current
request context. This is useful when working with greenlets. The moment
the function is decorated a copy of the request context is created and
then pushed when the function is called.
Example::
import gevent
from flask import copy_current_request_context
@app.route('/')
def index():
@copy_current_request_context
def do_some_work():
# do some work here, it can access flask.request like you
# would otherwise in the view function.
...
gevent.spawn(do_some_work)
return 'Regular response'
.. versionadded:: 0.10
"""
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('This decorator can only be used at local scopes '
'when a request context is on the stack. For instance within '
'view functions.')
reqctx = top.copy()
def wrapper(*args, **kwargs):
with reqctx:
return f(*args, **kwargs)
return update_wrapper(wrapper, f)
def has_request_context():
"""If you have code that wants to test if a request context is there or
not this function can be used. For instance, you may want to take advantage
of request information if the request object is available, but fail
silently if it is unavailable.
::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and has_request_context():
remote_addr = request.remote_addr
self.remote_addr = remote_addr
Alternatively you can also just test any of the context bound objects
(such as :class:`request` or :class:`g` for truthness)::
class User(db.Model):
def __init__(self, username, remote_addr=None):
self.username = username
if remote_addr is None and request:
remote_addr = request.remote_addr
self.remote_addr = remote_addr
.. versionadded:: 0.7
"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application
context. You can also just do a boolean check on the
:data:`current_app` object instead.
.. versionadded:: 0.9
"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly
to the current thread or greenlet, similar to how the
:class:`RequestContext` binds request information. The application
context is also implicitly created if a request context is created
but the application is not on top of the individual application
context.
"""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
class RequestContext(object):
"""The request context contains all request relevant information. It is
created at the beginning of the request and pushed to the
`_request_ctx_stack` and removed at the end of it. It will create the
URL adapter and request object for the WSGI environment provided.
Do not attempt to use this class directly, instead use
:meth:`~flask.Flask.test_request_context` and
:meth:`~flask.Flask.request_context` to create this object.
When the request context is popped, it will evaluate all the
functions registered on the application for teardown execution
(:meth:`~flask.Flask.teardown_request`).
The request context is automatically popped at the end of the request
for you. In debug mode the request context is kept around if
exceptions happen so that interactive debuggers have a chance to
introspect the data. With 0.4 this can also be forced for requests
that did not fail and outside of `DEBUG` mode. By setting
``'flask._preserve_context'`` to `True` on the WSGI environment the
context will not pop itself at the end of the request. This is used by
the :meth:`~flask.Flask.test_client` for example to implement the
deferred cleanup functionality.
You might find this helpful for unittests where you need the
information from the context local around for a little longer. Make
sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
that situation, otherwise your unittests will leak memory.
"""
def __init__(self, app, environ, request=None):
self.app = app
if request is None:
request = app.request_class(environ)
self.request = request
self.url_adapter = app.create_url_adapter(self.request)
self.flashes = None
self.session = None
# Request contexts can be pushed multiple times and interleaved with
# other request contexts. Now only if the last level is popped we
# get rid of them. Additionally if an application context is missing
# one is created implicitly so for each level we add this information
self._implicit_app_ctx_stack = []
# indicator if the context was preserved. Next time another context
# is pushed the preserved context is popped.
self.preserved = False
# remembers the exception for pop if there is one in case the context
# preservation kicks in.
self._preserved_exc = None
# Functions that should be executed after the request on the response
# object. These will be called before the regular "after_request"
# functions.
self._after_request_functions = []
self.match_request()
# XXX: Support for deprecated functionality. This is going away with
# Flask 1.0
blueprint = self.request.blueprint
if blueprint is not None:
# better safe than sorry, we don't want to break code that
# already worked
bp = app.blueprints.get(blueprint)
if bp is not None and blueprint_is_module(bp):
self.request._is_old_module = True
def _get_g(self):
return _app_ctx_stack.top.g
def _set_g(self, value):
_app_ctx_stack.top.g = value
g = property(_get_g, _set_g)
del _get_g, _set_g
def copy(self):
"""Creates a copy of this request context with the same request object.
This can be used to move a request context to a different greenlet.
Because the actual request object is the same this cannot be used to
move a request context to a different thread unless access to the
request object is locked.
.. versionadded:: 0.10
"""
return self.__class__(self.app,
environ=self.request.environ,
request=self.request
)
def match_request(self):
"""Can be overridden by a subclass to hook into the matching
of the request.
"""
try:
url_rule, self.request.view_args = \
self.url_adapter.match(return_rule=True)
self.request.url_rule = url_rule
except HTTPException as e:
self.request.routing_exception = e
def push(self):
"""Binds the request context to the current context."""
# If an exception occurs in debug mode or if context preservation is
# activated under exception situations exactly one context stays
# on the stack. The rationale is that you want to access that
# information under debug situations. However if someone forgets to
# pop that context again we want to make sure that on the next push
# it's invalidated, otherwise we run at risk that something leaks
# memory. This is usually only a problem in testsuite since this
# functionality is not active in production environments.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop(top._preserved_exc)
# Before we push the request context we have to ensure that there
# is an application context.
app_ctx = _app_ctx_stack.top
if app_ctx is None or app_ctx.app != self.app:
app_ctx = self.app.app_context()
app_ctx.push()
self._implicit_app_ctx_stack.append(app_ctx)
else:
self._implicit_app_ctx_stack.append(None)
_request_ctx_stack.push(self)
# Open the session at the moment that the request context is
# available. This allows a custom open_session method to use the
# request context (e.g. code that access database information
# stored on `g` instead of the appcontext).
self.session = self.app.open_session(self.request)
if self.session is None:
self.session = self.app.make_null_session()
def pop(self, exc=None):
"""Pops the request context and unbinds it by doing that. This will
also trigger the execution of functions registered by the
:meth:`~flask.Flask.teardown_request` decorator.
.. versionchanged:: 0.9
Added the `exc` argument.
"""
app_ctx = self._implicit_app_ctx_stack.pop()
clear_request = False
if not self._implicit_app_ctx_stack:
self.preserved = False
self._preserved_exc = None
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_request(exc)
# If this interpreter supports clearing the exception information
# we do that now. This will only go into effect on Python 2.x,
# on 3.x it disappears automatically at the end of the exception
# stack.
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
request_close = getattr(self.request, 'close', None)
if request_close is not None:
request_close()
clear_request = True
rv = _request_ctx_stack.pop()
assert rv is self, 'Popped wrong request context. (%r instead of %r)' \
% (rv, self)
# get rid of circular dependencies at the end of the request
# so that we don't require the GC to be active.
if clear_request:
rv.request.environ['werkzeug.request'] = None
# Get rid of the app as well if necessary.
if app_ctx is not None:
app_ctx.pop(exc)
def auto_pop(self, exc):
if self.request.environ.get('flask._preserve_context') or \
(exc is not None and self.app.preserve_context_on_exception):
self.preserved = True
self._preserved_exc = exc
else:
self.pop(exc)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
# do not pop the request stack if we are in debug mode and an
# exception happened. This will allow the debugger to still
# access the request object in the interactive shell. Furthermore
# the context can be force kept alive for the test client.
# See flask.testing for how this works.
self.auto_pop(exc_value)
def __repr__(self):
return '<%s \'%s\' [%s] of %s>' % (
self.__class__.__name__,
self.request.url,
self.request.method,
self.app.name,
)
|
newyork167/volatility | refs/heads/master | volatility/plugins/linux/check_syscall_arm.py | 45 | # Volatility
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Joe Sylve
@license: GNU General Public License 2.0
@contact: joe.sylve@gmail.com
@organization: 504ENSICS Labs
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
class linux_check_syscall_arm(linux_common.AbstractLinuxARMCommand):
""" Checks if the system call table has been altered """
def _get_syscall_table_size(self):
""" Get size of syscall table from the vector_swi function """
vector_swi_addr = self.addr_space.profile.get_symbol("vector_swi")
max_opcodes_to_check = 1024
while (max_opcodes_to_check):
opcode = obj.Object("unsigned int", offset = vector_swi_addr, vm = self.addr_space)
if ((opcode & 0xffff0000) == 0xe3570000):
shift = 0x10 - ((opcode & 0xff00) >> 8)
size = (opcode & 0xff) << (2 * shift)
return size
break
vector_swi_addr += 4
max_opcodes_to_check -= 1
debug.error("Syscall table size could not be determined.")
def _get_syscall_table_address(self):
""" returns the address of the syscall table """
syscall_table_address = self.addr_space.profile.get_symbol("sys_call_table")
if syscall_table_address:
return syscall_table_address
#TODO: Handle event where this isn't exported (if needed)
debug.error("Symbol sys_call_table not export. Please file a bug report.")
def calculate(self):
"""
This works by walking the system call table
and verifies that each is a symbol in the kernel
"""
linux_common.set_plugin_members(self)
num_syscalls = self._get_syscall_table_size()
syscall_addr = self._get_syscall_table_address()
sym_addrs = self.profile.get_all_addresses()
table = obj.Object("Array", offset = syscall_addr, vm = self.addr_space, targetType = "unsigned int", count = num_syscalls)
for (i, call_addr) in enumerate(table):
if not call_addr:
continue
# have to treat them as 'long' so need to mask
call_addr = call_addr & 0xffffffff
if not call_addr in sym_addrs:
yield(i, call_addr, 1)
else:
yield(i, call_addr, 0)
def render_text(self, outfd, data):
self.table_header(outfd, [("Index", "[addr]"), ("Address", "[addrpad]"), ("Symbol", "<30")])
for (i, call_addr, hooked) in data:
if hooked == 0:
sym_name = self.profile.get_symbol_by_address("kernel", call_addr)
else:
sym_name = "HOOKED"
self.table_row(outfd, i, call_addr, sym_name)
|
Alberto-Beralix/Beralix | refs/heads/master | i386-squashfs-root/usr/lib/python2.7/dist-packages/ibus/lookuptable.py | 2 | ../../../../share/pyshared/ibus/lookuptable.py |
Lawrence-Liu/scikit-learn | refs/heads/master | sklearn/feature_selection/variance_threshold.py | 238 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
|
AlessandroZ/LaZagne | refs/heads/master | Windows/lazagne/softwares/windows/creddump7/addrspace.py | 1 | # Volatility
# Copyright (C) 2007 Volatile Systems
#
# Original Source:
# Copyright (C) 2004,2005,2006 4tphi Research
# Author: {npetroni,awalters}@4tphi.net (Nick Petroni and AAron Walters)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
@author: AAron Walters
@license: GNU General Public License 2.0 or later
@contact: awalters@volatilesystems.com
@organization: Volatile Systems
"""
""" Alias for all address spaces """
import os
import struct
class FileAddressSpace:
def __init__(self, fname, mode='rb', fast=False):
self.fname = fname
self.name = fname
self.fhandle = open(fname, mode)
self.fsize = os.path.getsize(fname)
if fast:
self.fast_fhandle = open(fname, mode)
def fread(self, len):
return self.fast_fhandle.read(len)
def read(self, addr, len):
self.fhandle.seek(addr)
return self.fhandle.read(len)
def read_long(self, addr):
string = self.read(addr, 4)
(longval,) = struct.unpack('L', string)
return longval
def get_address_range(self):
return [0, self.fsize - 1]
def get_available_addresses(self):
return [self.get_address_range()]
def is_valid_address(self, addr):
return addr < self.fsize - 1
def close(self):
self.fhandle.close()
# Code below written by Brendan Dolan-Gavitt
BLOCK_SIZE = 0x1000
class HiveFileAddressSpace:
def __init__(self, fname):
self.fname = fname
self.base = FileAddressSpace(fname)
def vtop(self, vaddr):
return vaddr + BLOCK_SIZE + 4
def read(self, vaddr, length, zero=False):
first_block = BLOCK_SIZE - vaddr % BLOCK_SIZE
full_blocks = int((length + (vaddr % BLOCK_SIZE)) / BLOCK_SIZE) - 1
left_over = (length + vaddr) % BLOCK_SIZE
paddr = self.vtop(vaddr)
if not paddr and zero:
if length < first_block:
return "\0" * length
else:
stuff_read = "\0" * first_block
elif not paddr:
return None
else:
if length < first_block:
stuff_read = self.base.read(paddr, length)
if not stuff_read and zero:
return "\0" * length
else:
return stuff_read
stuff_read = self.base.read(paddr, first_block)
if not stuff_read and zero:
stuff_read = "\0" * first_block
new_vaddr = vaddr + first_block
for i in range(0, full_blocks):
paddr = self.vtop(new_vaddr)
if not paddr and zero:
stuff_read = stuff_read + "\0" * BLOCK_SIZE
elif not paddr:
return None
else:
new_stuff = self.base.read(paddr, BLOCK_SIZE)
if not new_stuff and zero:
new_stuff = "\0" * BLOCK_SIZE
elif not new_stuff:
return None
else:
stuff_read = stuff_read + new_stuff
new_vaddr = new_vaddr + BLOCK_SIZE
if left_over > 0:
paddr = self.vtop(new_vaddr)
if not paddr and zero:
stuff_read = stuff_read + "\0" * left_over
elif not paddr:
return None
else:
stuff_read = stuff_read + self.base.read(paddr, left_over)
return stuff_read
def read_long_phys(self, addr):
string = self.base.read(addr, 4)
(longval,) = struct.unpack('L', string)
return longval
def is_valid_address(self, vaddr):
paddr = self.vtop(vaddr)
if not paddr: return False
return self.base.is_valid_address(paddr)
|
blindFS/powerline | refs/heads/develop | powerline/lib/watcher/stat.py | 38 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from threading import RLock
from powerline.lib.path import realpath
class StatFileWatcher(object):
def __init__(self):
self.watches = {}
self.lock = RLock()
def watch(self, path):
path = realpath(path)
with self.lock:
self.watches[path] = os.path.getmtime(path)
def unwatch(self, path):
path = realpath(path)
with self.lock:
self.watches.pop(path, None)
def is_watching(self, path):
with self.lock:
return realpath(path) in self.watches
def __call__(self, path):
path = realpath(path)
with self.lock:
if path not in self.watches:
self.watches[path] = os.path.getmtime(path)
return True
mtime = os.path.getmtime(path)
if mtime != self.watches[path]:
self.watches[path] = mtime
return True
return False
def close(self):
with self.lock:
self.watches.clear()
|
jonathonwalz/ansible | refs/heads/devel | lib/ansible/modules/network/avi/avi_pool.py | 19 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_pool
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Pool Avi RESTful Object
description:
- This module is used to configure Pool object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
a_pool:
description:
- Name of container cloud application that constitutes a pool in a a-b pool configuration, if different from vs app.
ab_pool:
description:
- A/b pool configuration.
ab_priority:
description:
- Priority of this pool in a a-b pool pair.
- Internally used.
apic_epg_name:
description:
- Synchronize cisco apic epg members with pool servers.
application_persistence_profile_ref:
description:
- Persistence will ensure the same user sticks to the same server for a desired duration of time.
- It is a reference to an object of type applicationpersistenceprofile.
autoscale_launch_config_ref:
description:
- If configured then avi will trigger orchestration of pool server creation and deletion.
- It is only supported for container clouds like mesos, opensift, kubernates, docker etc.
- It is a reference to an object of type autoscalelaunchconfig.
autoscale_networks:
description:
- Network ids for the launch configuration.
autoscale_policy_ref:
description:
- Reference to server autoscale policy.
- It is a reference to an object of type serverautoscalepolicy.
capacity_estimation:
description:
- Inline estimation of capacity of servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
capacity_estimation_ttfb_thresh:
description:
- The maximum time-to-first-byte of a server.
- Allowed values are 1-5000.
- Special values are 0 - 'automatic'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
connection_ramp_duration:
description:
- Duration for which new connections will be gradually ramped up to a server recently brought online.
- Useful for lb algorithms that are least connection based.
- Allowed values are 1-300.
- Special values are 0 - 'immediate'.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
created_by:
description:
- Creator name.
default_server_port:
description:
- Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute.
- The ssl checkbox enables avi to server encryption.
- Allowed values are 1-65535.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
description:
description:
- A description of the pool.
domain_name:
description:
- Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates.
- It is performed only when common name check host_check_enabled is enabled.
east_west:
description:
- Inherited config from virtualservice.
enabled:
description:
- Enable or disable the pool.
- Disabling will terminate all open connections and pause health monitors.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
external_autoscale_groups:
description:
- Names of external auto-scale groups for pool servers.
- Currently available only for aws.
- Field introduced in 17.1.2.
version_added: "2.4"
fail_action:
description:
- Enable an action - close connection, http redirect, local http response, or backup pool - when a pool failure happens.
- By default, a connection will be closed, in case the pool experiences a failure.
fewest_tasks_feedback_delay:
description:
- Periodicity of feedback for fewest tasks server selection algorithm.
- Allowed values are 1-300.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
graceful_disable_timeout:
description:
- Used to gracefully disable a server.
- Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled.
- Allowed values are 1-60.
- Special values are 0 - 'immediate', -1 - 'infinite'.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
health_monitor_refs:
description:
- Verify server health by applying one or more health monitors.
- Active monitors generate synthetic traffic from each service engine and mark a server up or down based on the response.
- The passive monitor listens only to client to server communication.
- It raises or lowers the ratio of traffic destined to a server based on successful responses.
- It is a reference to an object of type healthmonitor.
host_check_enabled:
description:
- Enable common name check for server certificate.
- If enabled and no explicit domain name is specified, avi will use the incoming host header to do the match.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
inline_health_monitor:
description:
- The passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses.
- This may alter the expected behavior of the lb method, such as round robin.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ipaddrgroup_ref:
description:
- Use list of servers from ip address group.
- It is a reference to an object of type ipaddrgroup.
lb_algorithm:
description:
- The load balancing algorithm will pick a server within the pool's list of available servers.
- Enum options - LB_ALGORITHM_LEAST_CONNECTIONS, LB_ALGORITHM_ROUND_ROBIN, LB_ALGORITHM_FASTEST_RESPONSE, LB_ALGORITHM_CONSISTENT_HASH,
- LB_ALGORITHM_LEAST_LOAD, LB_ALGORITHM_FEWEST_SERVERS, LB_ALGORITHM_RANDOM, LB_ALGORITHM_FEWEST_TASKS, LB_ALGORITHM_NEAREST_SERVER.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_LEAST_CONNECTIONS.
lb_algorithm_consistent_hash_hdr:
description:
- Http header name to be used for the hash key.
lb_algorithm_hash:
description:
- Criteria used as a key for determining the hash between the client and server.
- Enum options - LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS, LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT,
- LB_ALGORITHM_CONSISTENT_HASH_URI, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_HEADER.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS.
max_concurrent_connections_per_server:
description:
- The maximum number of concurrent connections allowed to each server within the pool.
- Note applied value will be no less than the number of service engines that the pool is placed on.
- If set to 0, no limit is applied.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
max_conn_rate_per_server:
description:
- Rate limit connections to each server.
name:
description:
- The name of the pool.
required: true
networks:
description:
- (internal-use) networks designated as containing servers for this pool.
- The servers may be further narrowed down by a filter.
- This field is used internally by avi, not editable by the user.
nsx_securitygroup:
description:
- A list of nsx service groups where the servers for the pool are created.
- Field introduced in 17.1.1.
pki_profile_ref:
description:
- Avi will validate the ssl certificate present by a server against the selected pki profile.
- It is a reference to an object of type pkiprofile.
placement_networks:
description:
- Manually select the networks and subnets used to provide reachability to the pool's servers.
- Specify the subnet using the following syntax 10-1-1-0/24.
- Use static routes in vrf configuration when pool servers are not directly connected butroutable from the service engine.
prst_hdr_name:
description:
- Header name for custom header persistence.
request_queue_depth:
description:
- Minimum number of requests to be queued when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as 128.
request_queue_enabled:
description:
- Enable request queue when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_server_name:
description:
- Rewrite incoming host header to server name of the server to which the request is proxied.
- Enabling this feature rewrites host header for requests to all servers in the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_sni:
description:
- If sni server name is specified, rewrite incoming host header to the sni server name.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_auto_scale:
description:
- Server autoscale.
- Not used anymore.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_count:
description:
- Number of server_count.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
server_name:
description:
- Fully qualified dns hostname which will be used in the tls sni extension in server connections if sni is enabled.
- If no value is specified, avi will use the incoming host header instead.
server_reselect:
description:
- Server reselect configuration for http requests.
servers:
description:
- The pool directs load balanced traffic to this list of destination servers.
- The servers can be configured by ip address, name, network or via ip address group.
sni_enabled:
description:
- Enable tls sni for server connections.
- If disabled, avi will not send the sni extension as part of the handshake.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_key_and_certificate_ref:
description:
- Service engines will present a client ssl certificate to the server.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- When enabled, avi re-encrypts traffic to the backend servers.
- The specific ssl profile defines which ciphers and ssl versions will be supported.
- It is a reference to an object of type sslprofile.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
use_service_port:
description:
- Do not translate the client's destination port when sending the connection to the server.
- The pool or servers specified service port will still be used for health monitoring.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
uuid:
description:
- Uuid of the pool.
vrf_ref:
description:
- Virtual routing context that the pool is bound to.
- This is used to provide the isolation of the set of networks the pool is attached to.
- The pool inherits the virtual routing conext of the virtual service, and this field is used only internally, and is set by pb-transform.
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a Pool with two servers and HTTP monitor
avi_pool:
controller: 10.10.1.20
username: avi_user
password: avi_password
name: testpool1
description: testpool1
state: present
health_monitor_refs:
- '/api/healthmonitor?name=System-HTTP'
servers:
- ip:
addr: 10.10.2.20
type: V4
- ip:
addr: 10.10.2.21
type: V4
'''
RETURN = '''
obj:
description: Pool (api/pool) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
a_pool=dict(type='str',),
ab_pool=dict(type='dict',),
ab_priority=dict(type='int',),
apic_epg_name=dict(type='str',),
application_persistence_profile_ref=dict(type='str',),
autoscale_launch_config_ref=dict(type='str',),
autoscale_networks=dict(type='list',),
autoscale_policy_ref=dict(type='str',),
capacity_estimation=dict(type='bool',),
capacity_estimation_ttfb_thresh=dict(type='int',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
connection_ramp_duration=dict(type='int',),
created_by=dict(type='str',),
default_server_port=dict(type='int',),
description=dict(type='str',),
domain_name=dict(type='list',),
east_west=dict(type='bool',),
enabled=dict(type='bool',),
external_autoscale_groups=dict(type='list',),
fail_action=dict(type='dict',),
fewest_tasks_feedback_delay=dict(type='int',),
graceful_disable_timeout=dict(type='int',),
health_monitor_refs=dict(type='list',),
host_check_enabled=dict(type='bool',),
inline_health_monitor=dict(type='bool',),
ipaddrgroup_ref=dict(type='str',),
lb_algorithm=dict(type='str',),
lb_algorithm_consistent_hash_hdr=dict(type='str',),
lb_algorithm_hash=dict(type='str',),
max_concurrent_connections_per_server=dict(type='int',),
max_conn_rate_per_server=dict(type='dict',),
name=dict(type='str', required=True),
networks=dict(type='list',),
nsx_securitygroup=dict(type='list',),
pki_profile_ref=dict(type='str',),
placement_networks=dict(type='list',),
prst_hdr_name=dict(type='str',),
request_queue_depth=dict(type='int',),
request_queue_enabled=dict(type='bool',),
rewrite_host_header_to_server_name=dict(type='bool',),
rewrite_host_header_to_sni=dict(type='bool',),
server_auto_scale=dict(type='bool',),
server_count=dict(type='int',),
server_name=dict(type='str',),
server_reselect=dict(type='dict',),
servers=dict(type='list',),
sni_enabled=dict(type='bool',),
ssl_key_and_certificate_ref=dict(type='str',),
ssl_profile_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_service_port=dict(type='bool',),
uuid=dict(type='str',),
vrf_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pool',
set([]))
if __name__ == '__main__':
main()
|
zrhans/pythonanywhere | refs/heads/master | .virtualenvs/django19/lib/python3.4/site-packages/django/contrib/gis/geometry/backend/geos.py | 622 | from django.contrib.gis.geos import (
GEOSException as GeometryException, GEOSGeometry as Geometry,
)
__all__ = ['Geometry', 'GeometryException']
|
rossburton/yocto-autobuilder | refs/heads/ross | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/test/stdio_test_consumer.py | 40 | # -*- test-case-name: twisted.test.test_stdio.StandardInputOutputTestCase.test_consumer -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Main program for the child process run by
L{twisted.test.test_stdio.StandardInputOutputTestCase.test_consumer} to test
that process transports implement IConsumer properly.
"""
import sys, _preamble
from twisted.python import log, reflect
from twisted.internet import stdio, protocol
from twisted.protocols import basic
def failed(err):
log.startLogging(sys.stderr)
log.err(err)
class ConsumerChild(protocol.Protocol):
def __init__(self, junkPath):
self.junkPath = junkPath
def connectionMade(self):
d = basic.FileSender().beginFileTransfer(file(self.junkPath), self.transport)
d.addErrback(failed)
d.addCallback(lambda ign: self.transport.loseConnection())
def connectionLost(self, reason):
reactor.stop()
if __name__ == '__main__':
reflect.namedAny(sys.argv[1]).install()
from twisted.internet import reactor
stdio.StandardIO(ConsumerChild(sys.argv[2]))
reactor.run()
|
lostemp/samsung-lt-lsk-v3.10.11 | refs/heads/master | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
dpaiton/FeedbackLCA | refs/heads/main | utils/regress_data.py | 1 | ## Copyright 2015 Yahoo Inc.
## Licensed under the terms of the New-BSD license. Please see LICENSE file in the project root for terms.
import tensorflow as tf
from data.input_data import load_MNIST
"""
Regress on LCA activity
Outputs:
Score on test data
Inputs:
train_data: data to fit
train_labels: training ground-truth (should be 1-hot)
test_data: data to test fit on
test_labels: testing ground-truth (should be 1-hot)
batch_size: size of batch for training
num_trials: number of batches to train on
"""
def do_regression(train_data, train_labels, test_data, test_labels, sched,
batch_size=100, num_trials=30000, rand_seed=None):
(num_neurons, num_trn_examples) = train_data.shape
num_classes = train_labels.shape[0]
num_tst_examples = test_data.shape[1]
if rand_seed:
tf.set_random_seed(rand_seed)
global_step = tf.Variable(0, trainable=False, name="global_step")
x = tf.placeholder(tf.float32,
shape=[num_neurons, None], name="input_data")
y = tf.placeholder(tf.float32,
shape=[num_classes, None], name="input_label")
w_init = tf.truncated_normal([num_classes, num_neurons], mean=0.0, stddev=1.0,
dtype=tf.float32, name="w_init")
w = tf.Variable(w_init, dtype=tf.float32, trainable=True, name="w")
b = tf.Variable(tf.zeros([num_classes], dtype=tf.float32), trainable=True,
name="bias")
y_ = tf.transpose(tf.nn.softmax(tf.transpose(
tf.matmul(w, x, name="classify")), name="softmax"))
cross_entropy = -tf.reduce_sum(tf.mul(y,
tf.log(tf.clip_by_value(y_, 1e-10, 1.0))))
learning_rates = tf.train.exponential_decay(
learning_rate=sched["lr"],
global_step=global_step,
decay_steps=sched["decay_steps"],
decay_rate=sched["decay_rate"],
staircase=sched["staircase"],
name="annealing_schedule")
grad_op = tf.train.GradientDescentOptimizer(learning_rates)
train_step = grad_op.minimize(cross_entropy, global_step=global_step,
var_list=[w])
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
with tf.device("/cpu:0"):
sess.run(init_op)
curr_batch_idx = 0
for i in range(num_trials):
data = train_data[:, curr_batch_idx:curr_batch_idx+batch_size]
labels = train_labels[:, curr_batch_idx:curr_batch_idx+batch_size]
curr_batch_idx += batch_size
if curr_batch_idx >= train_data.shape[1]:
curr_batch_idx = 0
sess.run(train_step, feed_dict={x:data, y:labels})
test_accuracy = sess.run(accuracy, feed_dict={x:test_data, y:test_labels})
return (num_tst_examples * (1.0 - test_accuracy))
|
molecular-workflow-repository/molflow | refs/heads/master | molflow/utils.py | 1 | # Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# deals with issues around pathlib.Path.open
if sys.version_info.major == 3:
from contextlib import redirect_stdout
RMODE = 'r'
WMODE = 'w'
strtypes = (str, bytes)
else:
RMODE = 'rb'
WMODE = 'wb'
strtypes = (unicode, str, basestring)
|
Dandandan/wikiprogramming | refs/heads/master | jsrepl/build/extern/python/closured/lib/python2.7/nturl2path.py | 228 | """Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
# e.g.
# ///C|/foo/bar/spam.foo
# becomes
# C:\foo\bar\spam.foo
import string, urllib
# Windows itself uses ":" even in URLs.
url = url.replace(':', '|')
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
error = 'Bad URL: ' + url
raise IOError, error
drive = comp[0][-1].upper()
path = drive + ':'
components = comp[1].split('/')
for comp in components:
if comp:
path = path + '\\' + urllib.unquote(comp)
# Issue #11474: url like '/C|/' should convert into 'C:\\'
if path.endswith(':') and url.endswith('/'):
path += '\\'
return path
def pathname2url(p):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
# e.g.
# C:\foo\bar\spam.foo
# becomes
# ///C|/foo/bar/spam.foo
import urllib
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise IOError, error
drive = urllib.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + ':'
for comp in components:
if comp:
path = path + '/' + urllib.quote(comp)
return path
|
shane-mason/essentialdb | refs/heads/master | essentialdb/query_filter.py | 1 | __author__ = 'scmason'
from essentialdb import Keys
class LogicalOperator:
"""
Logical)perator rmodels a list of expressions that are bnound by logical operator - inclusing::
{ $and: [ { <expression1> }, { <expression2> } , ... , { <expressionN> } ] }
{ $or: [ { <expression1> }, { <expression2> } , ... , { <expressionN> } ] }
{ $nor: [ { <expression1> }, { <expression2> } , ... , { <expressionN> } ] }
{ $not: [ { <expression1> }, { <expression2> } , ... , { <expressionN> } ] }
The expressiona are stores as an ordered array, each are executed on calls to 'test_document'.
"""
def __init__(self, type, expressions):
self.type = type
self.expressions = expressions
self.field = None
def test_document(self, document):
match = True
if self.type == Keys._and:
for expression in self.expressions:
match = expression.test_document(document)
if match is False:
return False
elif self.type == Keys._or:
for expression in self.expressions:
match = expression.test_document(document)
if match is True:
return True
elif self.type == Keys._nor:
for expression in self.expressions:
match = expression.test_document(document)
if match is True:
return False
else:
match = True
elif self.type == Keys._not:
for expression in self.expressions:
match = expression.test_document(document)
if match is False:
break
match = not match
return match
class ComparisonOperator:
"""
ComparisonOperator represents expressions with comparison operators of the form::
{ field: { operator: value } }
For example:
{ 'first_name' : { '$eq' : 'john' } }
{ 'year' : { '$gt' : 1900 } }
And so on.
"""
def __init__(self, field, expression):
self.field = None
self.comparator_function = None
self.match_value = None
self.comparator = None
self.parse_expression(field, expression)
def parse_expression(self, field, expression):
self.field = field
# expression is something like {'$eq': 'something'}
# get the comparator function
self.comparator = list(expression.keys())[0]
self.comparator_function = Keys.comparisons[self.comparator]
self.match_value = expression[self.comparator]
def test_document(self, document):
try:
return self.comparator_function(document[self.field], self.match_value)
except:
try:
# then attempt nested lookup
nested_val = QueryFilter._lookup_dot_path(document, self.field)
return self.comparator_function(nested_val, self.match_value)
except:
# then the dotted path was not foung
pass
return False
class EqualityOperator:
"""
EqualityOperator checks for basic eqaulity, in expressions of the form::
{field : value}
For example:
{'first_name' : 'John'}
{'subscriber' : True}
And so on.
"""
def __init__(self, field, value):
self.field = field
self.match_value = value
def test_document(self, document):
try:
return self.match_value == document[self.field]
except:
# two major cases get us here
# 1. The key doesn't exist (in which case, we return false)
# 2. It's a dot notation nested query (in which case we will try a lookup
if isinstance(self.field, str) and "." in self.field:
try:
# then attempt nested lookup
nested_val = QueryFilter._lookup_dot_path(document, self.field)
return self.match_value == nested_val
except:
# then the dotted path was not foung
return False
# then its case 1
return False
class QueryFilter:
"""
Models a 'compiled' query document. The raw query doscument is sent in and 'parsed' or compiled into a list of
expressions. Later, the filter can be executed across a set of documents.
"""
def __init__(self, query_document):
# a single string should be an _id query
#if isinstance(query_document, str):
# self.expressions = [ComparisonOperator("_id", query_document)]
#else:
self.expressions = self.__parse_query(query_document, [])
def execute_filter(self, documents, filter_function=None, indexes={}):
"""
Execute the filter across a ser of provided documents.
"""
# first, look for the most simple case, which is an id lookup
if len(self.expressions) == 1 and isinstance(self.expressions[0], EqualityOperator) and self.expressions[
0].field == Keys.id:
id = self.expressions[0].match_value
if id in documents:
return [documents[id]]
else:
return []
results = []
#do we only have one expression, and if so, so we have an index on it?
if len(self.expressions) == 1 and self.expressions[0].field in indexes:
# if is ir equlity, then lets find just the matches
if isinstance(self.expressions[0], EqualityOperator) or (isinstance(self.expressions[0], ComparisonOperator)
and self.expressions[0].comparator == '$eq'):
documents = indexes[self.expressions[0].field].find(documents, self.expressions[0].match_value)
for key in documents:
matches = True
for expression in self.expressions:
matches = expression.test_document(documents[key])
if matches is False:
break
if filter_function:
matches = filter_function(documents[key])
if matches is True:
results.append(documents[key])
return results
def __parse_query(self, query_document, expression_list):
expressions = expression_list
for key in query_document:
if key in [Keys._and, Keys._or, Keys._nor, Keys._not]:
log_expressions = []
for item in query_document[key]:
log_expressions = self.__parse_query(item, log_expressions)
logical_operator = LogicalOperator(key, log_expressions)
expressions.append(logical_operator)
# basic expression - something like {"field': {'$eq': 'something'}}
elif isinstance(query_document[key], dict):
expressions.append(ComparisonOperator(key, query_document[key]))
# then we are left with {"field 1", "value 1"}
else:
expressions.append(EqualityOperator(key, query_document[key]))
return expressions
@staticmethod
def _lookup_dot_path(document, field):
path = field.split('.')
current = document
for item in path:
current = current[item]
return current |
supersven/intellij-community | refs/heads/master | python/helpers/pydev/_pydev_imps/_pydev_pkgutil_old.py | 234 | """Utilities to support packages."""
# NOTE: This module must remain compatible with Python 2.3, as it is shared
# by setuptools for distribution with Python 2.3 and up.
import os
import sys
import imp
import os.path
from types import ModuleType
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules', 'get_data',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
]
def read_code(stream):
# This helper is needed in order for the PEP 302 emulation to
# correctly handle compiled files
import marshal
magic = stream.read(4)
if magic != imp.get_magic():
return None
stream.read(4) # Skip timestamp
return marshal.load(stream)
def simplegeneric(func):
"""Make a trivial single-dispatch generic function"""
registry = {}
def wrapper(*args, **kw):
ob = args[0]
try:
cls = ob.__class__
except AttributeError:
cls = type(ob)
try:
mro = cls.__mro__
except AttributeError:
try:
class cls(cls, object):
pass
mro = cls.__mro__[1:]
except TypeError:
mro = object, # must be an ExtensionClass or some such :(
for t in mro:
if t in registry:
return registry[t](*args, **kw)
else:
return func(*args, **kw)
try:
wrapper.__name__ = func.__name__
except (TypeError, AttributeError):
pass # Python 2.3 doesn't allow functions to be renamed
def register(typ, func=None):
if func is None:
return lambda f: register(typ, f)
registry[typ] = func
return func
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.register = register
return wrapper
def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
for item in walk_packages(path, name+'.', onerror):
yield item
def iter_modules(path=None, prefix=''):
"""Yields (module_loader, name, ispkg) for all submodules on path,
or, if path is None, all top-level modules on sys.path.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
"""
if path is None:
importers = iter_importers()
else:
importers = map(get_importer, path)
yielded = {}
for i in importers:
for name, ispkg in iter_importer_modules(i, prefix):
if name not in yielded:
yielded[name] = 1
yield i, name, ispkg
#@simplegeneric
def iter_importer_modules(importer, prefix=''):
if not hasattr(importer, 'iter_modules'):
return []
return importer.iter_modules(prefix)
iter_importer_modules = simplegeneric(iter_importer_modules)
class ImpImporter:
"""PEP 302 Importer that wraps Python's "classic" import algorithm
ImpImporter(dirname) produces a PEP 302 importer that searches that
directory. ImpImporter(None) produces a PEP 302 importer that searches
the current sys.path, plus any modules that are frozen or built-in.
Note that ImpImporter does not currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
if self.path is None or not os.path.isdir(self.path):
return
yielded = {}
import inspect
try:
filenames = os.listdir(self.path)
except OSError:
# ignore unreadable directories like import does
filenames = []
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(self.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
try:
dircontents = os.listdir(path)
except OSError:
# ignore unreadable directories like import does
dircontents = []
for fn in dircontents:
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
class ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" import algorithm
"""
code = source = None
def __init__(self, fullname, file, filename, etc):
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_data(self, pathname):
return open(pathname, "rb").read()
def _reopen(self):
if self.file and self.file.closed:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'rU')
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
if fullname is None:
fullname = self.fullname
elif fullname != self.fullname:
raise ImportError("Loader for module %s cannot handle "
"module %s" % (self.fullname, fullname))
return fullname
def is_package(self, fullname):
fullname = self._fix_name(fullname)
return self.etc[2]==imp.PKG_DIRECTORY
def get_code(self, fullname=None):
fullname = self._fix_name(fullname)
if self.code is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
source = self.get_source(fullname)
self.code = compile(source, self.filename, 'exec')
elif mod_type==imp.PY_COMPILED:
self._reopen()
try:
self.code = read_code(self.file)
finally:
self.file.close()
elif mod_type==imp.PKG_DIRECTORY:
self.code = self._get_delegate().get_code()
return self.code
def get_source(self, fullname=None):
fullname = self._fix_name(fullname)
if self.source is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self._reopen()
try:
self.source = self.file.read()
finally:
self.file.close()
elif mod_type==imp.PY_COMPILED:
if os.path.exists(self.filename[:-1]):
f = open(self.filename[:-1], 'rU')
self.source = f.read()
f.close()
elif mod_type==imp.PKG_DIRECTORY:
self.source = self._get_delegate().get_source()
return self.source
def _get_delegate(self):
return ImpImporter(self.filename).find_module('__init__')
def get_filename(self, fullname=None):
fullname = self._fix_name(fullname)
mod_type = self.etc[2]
if self.etc[2]==imp.PKG_DIRECTORY:
return self._get_delegate().get_filename()
elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
return self.filename
return None
try:
import zipimport
from zipimport import zipimporter
def iter_zipimport_modules(importer, prefix=''):
dirlist = zipimport._zip_directory_cache[importer.archive].keys()
dirlist.sort()
_prefix = importer.prefix
plen = len(_prefix)
yielded = {}
import inspect
for fn in dirlist:
if not fn.startswith(_prefix):
continue
fn = fn[plen:].split(os.sep)
if len(fn)==2 and fn[1].startswith('__init__.py'):
if fn[0] not in yielded:
yielded[fn[0]] = 1
yield fn[0], True
if len(fn)!=1:
continue
modname = inspect.getmodulename(fn[0])
if modname=='__init__':
continue
if modname and '.' not in modname and modname not in yielded:
yielded[modname] = 1
yield prefix + modname, False
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
except ImportError:
pass
def get_importer(path_item):
"""Retrieve a PEP 302 importer for the given path item
The returned importer is cached in sys.path_importer_cache
if it was newly created by a path hook.
If there is no importer, a wrapper around the basic import
machinery is returned. This wrapper is never inserted into
the importer cache (None is inserted instead).
The cache (or part of it) can be cleared manually if a
rescan of sys.path_hooks is necessary.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for path_hook in sys.path_hooks:
try:
importer = path_hook(path_item)
break
except ImportError:
pass
else:
importer = None
sys.path_importer_cache.setdefault(path_item, importer)
if importer is None:
try:
importer = ImpImporter(path_item)
except ImportError:
importer = None
return importer
def iter_importers(fullname=""):
"""Yield PEP 302 importers for the given module name
If fullname contains a '.', the importers will be for the package
containing fullname, otherwise they will be importers for sys.meta_path,
sys.path, and Python's "classic" import machinery, in that order. If
the named module is in a package, that package is imported as a side
effect of invoking this function.
Non PEP 302 mechanisms (e.g. the Windows registry) used by the
standard import machinery to find files in alternative locations
are partially supported, but are searched AFTER sys.path. Normally,
these locations are searched BEFORE sys.path, preventing sys.path
entries from shadowing them.
For this to cause a visible difference in behaviour, there must
be a module or package name that is accessible via both sys.path
and one of the non PEP 302 file system mechanisms. In this case,
the emulation will find the former version, while the builtin
import mechanism will find the latter.
Items of the following types can be affected by this discrepancy:
imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
"""
if fullname.startswith('.'):
raise ImportError("Relative module names not supported")
if '.' in fullname:
# Get the containing package's __path__
pkg = '.'.join(fullname.split('.')[:-1])
if pkg not in sys.modules:
__import__(pkg)
path = getattr(sys.modules[pkg], '__path__', None) or []
else:
for importer in sys.meta_path:
yield importer
path = sys.path
for item in path:
yield get_importer(item)
if '.' not in fullname:
yield ImpImporter()
def get_loader(module_or_name):
"""Get a PEP 302 "loader" object for module_or_name
If the module or package is accessible via the normal import
mechanism, a wrapper around the relevant part of that machinery
is returned. Returns None if the module cannot be found or imported.
If the named module is not already imported, its containing package
(if any) is imported, in order to establish the package __path__.
This function uses iter_importers(), and is thus subject to the same
limitations regarding platform-specific special import locations such
as the Windows registry.
"""
if module_or_name in sys.modules:
module_or_name = sys.modules[module_or_name]
if isinstance(module_or_name, ModuleType):
module = module_or_name
loader = getattr(module, '__loader__', None)
if loader is not None:
return loader
fullname = module.__name__
else:
fullname = module_or_name
return find_loader(fullname)
def find_loader(fullname):
"""Find a PEP 302 "loader" object for fullname
If fullname contains dots, path must be the containing package's __path__.
Returns None if the module cannot be found or imported. This function uses
iter_importers(), and is thus subject to the same limitations regarding
platform-specific special import locations such as the Windows registry.
"""
for importer in iter_importers(fullname):
loader = importer.find_module(fullname)
if loader is not None:
return loader
return None
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError, msg:
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path
def get_data(package, resource):
"""Get a resource from a package.
This is a wrapper round the PEP 302 loader get_data API. The package
argument should be the name of a package, in standard module format
(foo.bar). The resource argument should be in the form of a relative
filename, using '/' as the path separator. The parent directory name '..'
is not allowed, and nor is a rooted name (starting with a '/').
The function returns a binary string, which is the contents of the
specified resource.
For packages located in the filesystem, which have already been imported,
this is the rough equivalent of
d = os.path.dirname(sys.modules[package].__file__)
data = open(os.path.join(d, resource), 'rb').read()
If the package cannot be located or loaded, or it uses a PEP 302 loader
which does not support get_data(), then None is returned.
"""
loader = get_loader(package)
if loader is None or not hasattr(loader, 'get_data'):
return None
mod = sys.modules.get(package) or loader.load_module(package)
if mod is None or not hasattr(mod, '__file__'):
return None
# Modify the resource name to be compatible with the loader.get_data
# signature - an os.path format "filename" starting with the dirname of
# the package's __file__
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return loader.get_data(resource_name)
|
apple/swift-lldb | refs/heads/stable | packages/Python/lldbsuite/test/commands/help/TestHelp.py | 1 | """
Test some lldb help commands.
See also CommandInterpreter::OutputFormattedHelpText().
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class HelpCommandTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_simplehelp(self):
"""A simple test of 'help' command and its output."""
self.expect("help",
startstr='Debugger commands:')
self.expect("help -a", matching=False,
substrs=['next'])
self.expect("help", matching=True,
substrs=['next'])
@no_debug_info_test
def test_help_on_help(self):
"""Testing the help on the help facility."""
self.expect("help help", matching=True,
substrs=['--hide-aliases',
'--hide-user-commands'])
@no_debug_info_test
def version_number_string(self):
"""Helper function to find the version number string of lldb."""
plist = os.path.join(
os.environ["LLDB_SRC"],
"resources",
"LLDB-Info.plist")
try:
CFBundleVersionSegFound = False
with open(plist, 'r') as f:
for line in f:
if CFBundleVersionSegFound:
version_line = line.strip()
import re
m = re.match("<string>(.*)</string>", version_line)
if m:
version = m.group(1)
return version
else:
# Unsuccessful, let's juts break out of the for
# loop.
break
if line.find("<key>CFBundleVersion</key>") != -1:
# Found our match. The next line contains our version
# string, for example:
#
# <string>38</string>
CFBundleVersionSegFound = True
except:
# Just fallthrough...
import traceback
traceback.print_exc()
# Use None to signify that we are not able to grok the version number.
return None
@no_debug_info_test
def test_help_arch(self):
"""Test 'help arch' which should list of supported architectures."""
self.expect("help arch",
substrs=['arm', 'x86_64', 'i386'])
@no_debug_info_test
def test_help_version(self):
"""Test 'help version' and 'version' commands."""
self.expect("help version",
substrs=['Show the LLDB debugger version.'])
import re
version_str = self.version_number_string()
match = re.match('[0-9]+', version_str)
search_regexp = ['lldb( version|-' + (version_str if match else '[0-9]+') + '| \(swift-.*\)).*\n']
search_regexp[0] += 'Swift'
self.expect("version",
patterns=search_regexp)
@no_debug_info_test
def test_help_should_not_crash_lldb(self):
"""Command 'help disasm' should not crash lldb."""
self.runCmd("help disasm", check=False)
self.runCmd("help unsigned-integer")
@no_debug_info_test
def test_help_should_not_hang_emacsshell(self):
"""Command 'settings set term-width 0' should not hang the help command."""
self.expect(
"settings set term-width 0",
COMMAND_FAILED_AS_EXPECTED,
error=True,
substrs=['error: 0 is out of range, valid values must be between'])
# self.runCmd("settings set term-width 0")
self.expect("help",
startstr='Debugger commands:')
@no_debug_info_test
def test_help_breakpoint_set(self):
"""Test that 'help breakpoint set' does not print out redundant lines of:
'breakpoint set [-s <shlib-name>] ...'."""
self.expect("help breakpoint set", matching=False,
substrs=['breakpoint set [-s <shlib-name>]'])
@no_debug_info_test
def test_help_image_dump_symtab_should_not_crash(self):
"""Command 'help image dump symtab' should not crash lldb."""
# 'image' is an alias for 'target modules'.
self.expect("help image dump symtab",
substrs=['dump symtab',
'sort-order'])
@no_debug_info_test
def test_help_image_du_sym_is_ambiguous(self):
"""Command 'help image du sym' is ambiguous and spits out the list of candidates."""
self.expect("help image du sym",
COMMAND_FAILED_AS_EXPECTED, error=True,
substrs=['error: ambiguous command image du sym',
'symfile',
'symtab'])
@no_debug_info_test
def test_help_image_du_line_should_work(self):
"""Command 'help image du line-table' is not ambiguous and should work."""
# 'image' is an alias for 'target modules'.
self.expect("help image du line", substrs=[
'Dump the line table for one or more compilation units'])
@no_debug_info_test
def test_help_target_variable_syntax(self):
"""Command 'help target variable' should display <variable-name> ..."""
self.expect("help target variable",
substrs=['<variable-name> [<variable-name> [...]]'])
@no_debug_info_test
def test_help_watchpoint_and_its_args(self):
"""Command 'help watchpoint', 'help watchpt-id', and 'help watchpt-id-list' should work."""
self.expect("help watchpoint",
substrs=['delete', 'disable', 'enable', 'list'])
self.expect("help watchpt-id",
substrs=['<watchpt-id>'])
self.expect("help watchpt-id-list",
substrs=['<watchpt-id-list>'])
@no_debug_info_test
def test_help_watchpoint_set(self):
"""Test that 'help watchpoint set' prints out 'expression' and 'variable'
as the possible subcommands."""
self.expect("help watchpoint set",
substrs=['The following subcommands are supported:'],
patterns=['expression +--',
'variable +--'])
@no_debug_info_test
def test_help_po_hides_options(self):
"""Test that 'help po' does not show all the options for expression"""
self.expect(
"help po",
substrs=[
'--show-all-children',
'--object-description'],
matching=False)
@no_debug_info_test
def test_help_run_hides_options(self):
"""Test that 'help run' does not show all the options for process launch"""
self.expect("help run",
substrs=['--arch', '--environment'], matching=False)
@no_debug_info_test
def test_help_next_shows_options(self):
"""Test that 'help next' shows all the options for thread step-over"""
self.expect("help next",
substrs=['--python-class', '--run-mode'], matching=True)
@no_debug_info_test
def test_help_provides_alternatives(self):
"""Test that help on commands that don't exist provides information on additional help avenues"""
self.expect(
"help thisisnotadebuggercommand",
substrs=[
"'thisisnotadebuggercommand' is not a known command.",
"Try 'help' to see a current list of commands.",
"Try 'apropos thisisnotadebuggercommand' for a list of related commands.",
"Try 'type lookup thisisnotadebuggercommand' for information on types, methods, functions, modules, etc."],
error=True)
self.expect(
"help process thisisnotadebuggercommand",
substrs=[
"'process thisisnotadebuggercommand' is not a known command.",
"Try 'help' to see a current list of commands.",
"Try 'apropos thisisnotadebuggercommand' for a list of related commands.",
"Try 'type lookup thisisnotadebuggercommand' for information on types, methods, functions, modules, etc."])
@no_debug_info_test
def test_custom_help_alias(self):
"""Test that aliases pick up custom help text."""
def cleanup():
self.runCmd('command unalias afriendlyalias', check=False)
self.runCmd('command unalias averyfriendlyalias', check=False)
self.addTearDownHook(cleanup)
self.runCmd(
'command alias --help "I am a friendly alias" -- afriendlyalias help')
self.expect(
"help afriendlyalias",
matching=True,
substrs=['I am a friendly alias'])
self.runCmd(
'command alias --long-help "I am a very friendly alias" -- averyfriendlyalias help')
self.expect("help averyfriendlyalias", matching=True,
substrs=['I am a very friendly alias'])
@no_debug_info_test
def test_alias_prints_origin(self):
"""Test that 'help <unique_match_to_alias>' prints the alias origin."""
def cleanup():
self.runCmd('command unalias alongaliasname', check=False)
self.addTearDownHook(cleanup)
self.runCmd('command alias alongaliasname help')
self.expect("help alongaliasna", matching=True,
substrs=["'alongaliasna' is an abbreviation for 'help'"])
@no_debug_info_test
def test_hidden_help(self):
self.expect("help -h",
substrs=["_regexp-bt"])
@no_debug_info_test
def test_help_ambiguous(self):
self.expect("help g",
substrs=["Help requested with ambiguous command name, possible completions:",
"gdb-remote", "gui"])
@no_debug_info_test
def test_help_unknown_flag(self):
self.expect("help -z", error=True,
substrs=["unknown or ambiguous option"])
@no_debug_info_test
def test_help_format_output(self):
"""Test that help output reaches TerminalWidth."""
self.runCmd(
'settings set term-width 108')
self.expect(
"help format",
matching=True,
substrs=['<format> -- One of the format names'])
|
savoirfairelinux/django | refs/heads/master | tests/template_tests/templatetags/testtags.py | 145 | from django.template import Library, Node
register = Library()
class EchoNode(Node):
def __init__(self, contents):
self.contents = contents
def render(self, context):
return ' '.join(self.contents)
@register.tag
def echo(parser, token):
return EchoNode(token.contents.split()[1:])
register.tag('other_echo', echo)
@register.filter
def upper(value):
return value.upper()
|
supermihi/pytaglib | refs/heads/master | tests/test_io.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2019 Michael Helmling
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation
#
from __future__ import unicode_literals
import os, stat, sys
import taglib
import pytest
from . import copy_test_file
def test_not_existing_file_raises():
"""Ensure OSError is raised if a file does not exist, or is a directory."""
with pytest.raises(OSError):
taglib.File('/this/file/almost/certainly/does/not/exist.flac')
with pytest.raises(OSError):
taglib.File('/spæciäl/chàracterß.mp3')
with pytest.raises(OSError):
taglib.File('/usr') # directory
with pytest.raises(OSError):
taglib.File("/nonexistent.ogg")
@pytest.mark.skipif(sys.platform == 'win32', reason="getuid() only on windows")
def test_os_error_on_save_read_only_file(tmpdir):
"""Ensure OSError is raised when save() is called on read-only files."""
if os.getuid() == 0:
pytest.skip('taglib allows writing read-only files as root')
f = copy_test_file('rare_frames.mp3', tmpdir)
os.chmod(f, stat.S_IREAD)
tf = taglib.File(f)
assert tf.readOnly
with pytest.raises(OSError):
tf.save()
os.chmod(f, stat.S_IREAD & stat.S_IWRITE)
tf.close()
@pytest.mark.skipif(sys.platform == 'win32', reason="getuid() only on windows")
def test_file_with_non_ascii_name_throws_on_readonly_save(tmpdir):
"""Motivated by https://github.com/supermihi/pytaglib/issues/21.
"""
if os.getuid() == 0:
pytest.skip('taglib allows writing read-only files as root')
copy_file = copy_test_file('readönly.mp3', tmpdir)
os.chmod(copy_file, stat.S_IREAD)
tfile = taglib.File(copy_file.encode('utf8'))
tfile.tags['COMMENT'] = ['']
with pytest.raises(OSError):
tfile.save()
tfile.close()
def test_can_read_bytes_filename_non_ascii(tmpdir):
f = copy_test_file('testöü.flac', tmpdir)
tf = taglib.File(f.encode('utf8'))
tf.close()
def test_can_read_unicode_filename_non_ascii(tmpdir):
f = copy_test_file('testöü.flac', tmpdir)
if sys.version_info.major == 2:
f = unicode(f)
tf = taglib.File(f)
tf.close()
|
shubhamgupta123/erpnext | refs/heads/master | erpnext/accounts/doctype/account/chart_of_accounts/verified/standard_chart_of_accounts_with_account_number.py | 16 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from frappe import _
def get():
return {
_("Application of Funds (Assets)"): {
_("Current Assets"): {
_("Accounts Receivable"): {
_("Debtors"): {
"account_type": "Receivable",
"account_number": "1310"
},
"account_number": "1300"
},
_("Bank Accounts"): {
"account_type": "Bank",
"is_group": 1,
"account_number": "1200"
},
_("Cash In Hand"): {
_("Cash"): {
"account_type": "Cash",
"account_number": "1110"
},
"account_type": "Cash",
"account_number": "1100"
},
_("Loans and Advances (Assets)"): {
_("Employee Advances"): {
"account_number": "1610"
},
"account_number": "1600"
},
_("Securities and Deposits"): {
_("Earnest Money"): {
"account_number": "1651"
},
"account_number": "1650"
},
_("Stock Assets"): {
_("Stock In Hand"): {
"account_type": "Stock",
"account_number": "1410"
},
"account_type": "Stock",
"account_number": "1400"
},
_("Tax Assets"): {
"is_group": 1,
"account_number": "1500"
},
"account_number": "1100-1600"
},
_("Fixed Assets"): {
_("Capital Equipments"): {
"account_type": "Fixed Asset",
"account_number": "1710"
},
_("Electronic Equipments"): {
"account_type": "Fixed Asset",
"account_number": "1720"
},
_("Furnitures and Fixtures"): {
"account_type": "Fixed Asset",
"account_number": "1730"
},
_("Office Equipments"): {
"account_type": "Fixed Asset",
"account_number": "1740"
},
_("Plants and Machineries"): {
"account_type": "Fixed Asset",
"account_number": "1750"
},
_("Buildings"): {
"account_type": "Fixed Asset",
"account_number": "1760"
},
_("Softwares"): {
"account_type": "Fixed Asset",
"account_number": "1770"
},
_("Accumulated Depreciation"): {
"account_type": "Accumulated Depreciation",
"account_number": "1780"
},
_("CWIP Account"): {
"account_type": "Capital Work in Progress",
"account_number": "1790"
},
"account_number": "1700"
},
_("Investments"): {
"is_group": 1,
"account_number": "1800"
},
_("Temporary Accounts"): {
_("Temporary Opening"): {
"account_type": "Temporary",
"account_number": "1910"
},
"account_number": "1900"
},
"root_type": "Asset",
"account_number": "1000"
},
_("Expenses"): {
_("Direct Expenses"): {
_("Stock Expenses"): {
_("Cost of Goods Sold"): {
"account_type": "Cost of Goods Sold",
"account_number": "5111"
},
_("Expenses Included In Asset Valuation"): {
"account_type": "Expenses Included In Asset Valuation",
"account_number": "5112"
},
_("Expenses Included In Valuation"): {
"account_type": "Expenses Included In Valuation",
"account_number": "5118"
},
_("Stock Adjustment"): {
"account_type": "Stock Adjustment",
"account_number": "5119"
},
"account_number": "5110"
},
"account_number": "5100"
},
_("Indirect Expenses"): {
_("Administrative Expenses"): {
"account_number": "5201"
},
_("Commission on Sales"): {
"account_number": "5202"
},
_("Depreciation"): {
"account_type": "Depreciation",
"account_number": "5203"
},
_("Entertainment Expenses"): {
"account_number": "5204"
},
_("Freight and Forwarding Charges"): {
"account_type": "Chargeable",
"account_number": "5205"
},
_("Legal Expenses"): {
"account_number": "5206"
},
_("Marketing Expenses"): {
"account_type": "Chargeable",
"account_number": "5207"
},
_("Office Maintenance Expenses"): {
"account_number": "5208"
},
_("Office Rent"): {
"account_number": "5209"
},
_("Postal Expenses"): {
"account_number": "5210"
},
_("Print and Stationery"): {
"account_number": "5211"
},
_("Round Off"): {
"account_type": "Round Off",
"account_number": "5212"
},
_("Salary"): {
"account_number": "5213"
},
_("Sales Expenses"): {
"account_number": "5214"
},
_("Telephone Expenses"): {
"account_number": "5215"
},
_("Travel Expenses"): {
"account_number": "5216"
},
_("Utility Expenses"): {
"account_number": "5217"
},
_("Write Off"): {
"account_number": "5218"
},
_("Exchange Gain/Loss"): {
"account_number": "5219"
},
_("Gain/Loss on Asset Disposal"): {
"account_number": "5220"
},
_("Miscellaneous Expenses"): {
"account_type": "Chargeable",
"account_number": "5221"
},
"account_number": "5200"
},
"root_type": "Expense",
"account_number": "5000"
},
_("Income"): {
_("Direct Income"): {
_("Sales"): {
"account_number": "4110"
},
_("Service"): {
"account_number": "4120"
},
"account_number": "4100"
},
_("Indirect Income"): {
"is_group": 1,
"account_number": "4200"
},
"root_type": "Income",
"account_number": "4000"
},
_("Source of Funds (Liabilities)"): {
_("Current Liabilities"): {
_("Accounts Payable"): {
_("Creditors"): {
"account_type": "Payable",
"account_number": "2110"
},
_("Payroll Payable"): {
"account_number": "2120"
},
"account_number": "2100"
},
_("Stock Liabilities"): {
_("Stock Received But Not Billed"): {
"account_type": "Stock Received But Not Billed",
"account_number": "2210"
},
_("Asset Received But Not Billed"): {
"account_type": "Asset Received But Not Billed",
"account_number": "2211"
},
"account_number": "2200"
},
_("Duties and Taxes"): {
"account_type": "Tax",
"is_group": 1,
"account_number": "2300"
},
_("Loans (Liabilities)"): {
_("Secured Loans"): {
"account_number": "2410"
},
_("Unsecured Loans"): {
"account_number": "2420"
},
_("Bank Overdraft Account"): {
"account_number": "2430"
},
"account_number": "2400"
},
"account_number": "2100-2400"
},
"root_type": "Liability",
"account_number": "2000"
},
_("Equity"): {
_("Capital Stock"): {
"account_type": "Equity",
"account_number": "3100"
},
_("Dividends Paid"): {
"account_type": "Equity",
"account_number": "3200"
},
_("Opening Balance Equity"): {
"account_type": "Equity",
"account_number": "3300"
},
_("Retained Earnings"): {
"account_type": "Equity",
"account_number": "3400"
},
"root_type": "Equity",
"account_number": "3000"
}
}
|
uwafsl/MissionPlanner | refs/heads/master | Lib/site-packages/numpy/core/tests/test_npy_arraytypes.py | 54 | import sys
import warnings
import numpy as np
from numpy.testing import *
warnings.filterwarnings('ignore',
'Casting complex values to real discards the imaginary part')
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
alltypes = list( types )
alltypes.append( np.datetime64 )
alltypes.append( np.timedelta64 )
class TestArrayTypes(TestCase):
def test_argmax( self ):
x = np.array( [False, False, True, False], dtype=np.bool )
assert x.argmax() == 2, "Broken array.argmax on np.bool"
a = np.array( [u'aaa', u'aa', u'bbb'] )
# u'aaa' > u'aa' and u'bbb' > u'aaa' Hence, argmax == 2.
assert a.argmax() == 2, "Broken array.argmax on unicode data."
a = np.array( [ 'aaa', 'aa', 'bbb'] )
# 'aaa' > 'aa' and 'bbb' > 'aaa' Hence, argmax == 2.
assert a.argmax() == 2, "Broken array.argmax on string data."
def test_argmax_numeric( self ):
# Skip the np.bool_ type as it lacks a fill function, hence can't use
# arange().
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
assert a.argmax() == 4, "Broken array.argmax on type: " + t
def test_nonzero_numeric_types( self ):
for k,t in enumerate(alltypes):
a = np.array( [ t(1) ] )
assert a, "Broken array.nonzero on type: " + t
def test_nonzero_string_types( self ):
a = np.array( [ 'aaa' ] )
assert a, "Broken array.nonzero on string elements."
a = np.array( [ u'aaa' ] )
assert a, "Broken array.nonzero on Unicode elements."
def test_compare( self ):
# Light bulb! argmax doesn't call compare() for numeric/logical
# types. It only does that for string types. Duh.
pass
def test_copyswap( self ):
# Skip np.bool_.
for k,t in enumerate( types[1:] ):
x = np.arange( 10, dtype=t )
# This should exeercise <typoe>_copyswap
x[::2].fill( t(2) )
assert_equal( x, [2,1,2,3,2,5,2,7,2,9] )
def test_copyswap_misc( self ):
x = np.array( [ u'a', u'b', u'c' ] )
x[::2].fill( u'd' )
assert_equal( x, [u'd', u'b', u'd'] )
def test_copyswapn( self ):
# bool lacks arange support.
for k,t in enumerate( alltypes[1:] ):
x = np.arange( 10, dtype=t )
y = x.byteswap()
z = y.byteswap()
assert_equal( z, x )
def test_copyswapn_misc( self ):
x = np.array( [ u'a', u'b', u'c' ] )
y = x.byteswap()
z = y.byteswap()
assert_equal( z, x )
def test_compare( self ):
for k,t in enumerate( alltypes[1:] ):
try:
a = np.arange( 10, dtype=t )
keys = a[::2]
b = a.searchsorted( keys )
c = a.copy()
np.insert( c, b, b.astype( t ) )
c.sort()
assert_equal( c, a )
except TypeError, e:
print "Trouble with type %d:" % k, e
def test_compare_bool( self ):
# bool can't handle numpy.arange(), so has to be coded separately.
a = np.array( [False, True], dtype=np.bool_ )
keys = a
b = a.searchsorted( keys )
c = a.copy()
np.insert( c, b, keys )
c.sort()
assert_equal( c, a )
def test_dot( self ):
# Do something to test dot on bool...
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 3, dtype=t ) + 1
assert a.dot(a) == t(14), \
"Problem with dot product with array of type %s" % k
def test_clip( self ):
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
b = a.clip( 2, 3 )
x = np.array( [2,2,2,3,3], dtype=t )
assert_equal( b, x )
def test_clip_bool( self ):
a = np.array( [False, True], np.bool )
assert_equal( a.clip(False,False), [False, False] )
def test_array_casting( self ):
for k,t in enumerate( alltypes ):
a = np.array( [ t(1) ] )
for k2, t2 in enumerate( alltypes ):
b = a.astype( t2 )
if k2 < len(types):
assert b[0] == 1, \
"Busted array type casting: k=%d k2=%d" % (k,k2)
else:
# Casting to datetime64 yields a 1/1/1970+... result,
# which isn't so hot for checking against "1". So, in
# these cases, just cast back to the starting time, and
# make sure we got back what we started with.
c = b.astype( t )
assert_equal( c, a )
def test_take( self ):
# Test all types, but skipp np.bool_ for now, as it lacks a fill
# function. Grrr.
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 10, dtype=t )
idx = np.arange(5) * 2
c = np.take( a, idx )
assert_equal( c, a[::2] )
def test_putmask( self ):
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
mask = np.zeros( 5, dtype=np.bool )
mask[::2] = True
np.putmask( a, mask, t(8) )
x = np.array( [8,1,8,3,8], dtype=t )
assert_equal( a, x )
def test_fillwithscalar( self ):
a = np.empty( 2, dtype=np.datetime64 )
a.fill( np.datetime64( 3 ) )
x = np.zeros( 2, dtype=np.datetime64 ) + 3
assert_equal( a, x )
if __name__ == "__main__":
run_module_suite()
|
berinhard/newfies-dialer | refs/heads/master | newfies/custom_admin_tools/menu.py | 4 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2013 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
"""This file was generated with the custommenu management command, it contains
the classes for the admin menu, you can customize this class as you want.
To activate your custom menu add the following to your settings.py::
ADMIN_TOOLS_MENU = 'menu.CustomMenu'"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from admin_tools.menu import items, Menu
import newfies
class CustomMenu(Menu):
"""Custom Menu for admin site."""
def __init__(self, **kwargs):
Menu.__init__(self, **kwargs)
self.children += [
items.MenuItem(_('Newfies-Dialer' + ' V' + newfies.__version__), reverse('admin:index')),
items.Bookmarks(),
items.AppList(
_('applications').capitalize(),
exclude=('django.contrib.*', )
),
items.AppList(
_('administration').capitalize(),
models=('django.contrib.*', )
),
items.MenuItem(_('API Explorer'), reverse('admin:index') + '../api-explorer/'),
items.MenuItem(_('customer panel').title(), reverse('admin:index') + '../'),
]
def init_with_context(self, context):
"""Use this method if you need to access the request context."""
return super(CustomMenu, self).init_with_context(context)
|
rwatson/chromium-capsicum | refs/heads/chromium-capsicum | third_party/scons/scons-local/SCons/Tool/packaging/src_tarbz2.py | 3 | """SCons.Tool.Packaging.tarbz2
The tarbz2 SRC packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/src_tarbz2.py 3897 2009/01/13 06:45:54 scons"
from SCons.Tool.packaging import putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.bz2')
target, source = putintopackageroot(target, source, env, PACKAGEROOT, honor_install_location=0)
return bld(env, target, source, TARFLAGS='-jc')
|
fernandog/osmc | refs/heads/master | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x05d.py | 252 | data = (
'Lang ', # 0x00
'Kan ', # 0x01
'Lao ', # 0x02
'Lai ', # 0x03
'Xian ', # 0x04
'Que ', # 0x05
'Kong ', # 0x06
'Chong ', # 0x07
'Chong ', # 0x08
'Ta ', # 0x09
'Lin ', # 0x0a
'Hua ', # 0x0b
'Ju ', # 0x0c
'Lai ', # 0x0d
'Qi ', # 0x0e
'Min ', # 0x0f
'Kun ', # 0x10
'Kun ', # 0x11
'Zu ', # 0x12
'Gu ', # 0x13
'Cui ', # 0x14
'Ya ', # 0x15
'Ya ', # 0x16
'Gang ', # 0x17
'Lun ', # 0x18
'Lun ', # 0x19
'Leng ', # 0x1a
'Jue ', # 0x1b
'Duo ', # 0x1c
'Zheng ', # 0x1d
'Guo ', # 0x1e
'Yin ', # 0x1f
'Dong ', # 0x20
'Han ', # 0x21
'Zheng ', # 0x22
'Wei ', # 0x23
'Yao ', # 0x24
'Pi ', # 0x25
'Yan ', # 0x26
'Song ', # 0x27
'Jie ', # 0x28
'Beng ', # 0x29
'Zu ', # 0x2a
'Jue ', # 0x2b
'Dong ', # 0x2c
'Zhan ', # 0x2d
'Gu ', # 0x2e
'Yin ', # 0x2f
'[?] ', # 0x30
'Ze ', # 0x31
'Huang ', # 0x32
'Yu ', # 0x33
'Wei ', # 0x34
'Yang ', # 0x35
'Feng ', # 0x36
'Qiu ', # 0x37
'Dun ', # 0x38
'Ti ', # 0x39
'Yi ', # 0x3a
'Zhi ', # 0x3b
'Shi ', # 0x3c
'Zai ', # 0x3d
'Yao ', # 0x3e
'E ', # 0x3f
'Zhu ', # 0x40
'Kan ', # 0x41
'Lu ', # 0x42
'Yan ', # 0x43
'Mei ', # 0x44
'Gan ', # 0x45
'Ji ', # 0x46
'Ji ', # 0x47
'Huan ', # 0x48
'Ting ', # 0x49
'Sheng ', # 0x4a
'Mei ', # 0x4b
'Qian ', # 0x4c
'Wu ', # 0x4d
'Yu ', # 0x4e
'Zong ', # 0x4f
'Lan ', # 0x50
'Jue ', # 0x51
'Yan ', # 0x52
'Yan ', # 0x53
'Wei ', # 0x54
'Zong ', # 0x55
'Cha ', # 0x56
'Sui ', # 0x57
'Rong ', # 0x58
'Yamashina ', # 0x59
'Qin ', # 0x5a
'Yu ', # 0x5b
'Kewashii ', # 0x5c
'Lou ', # 0x5d
'Tu ', # 0x5e
'Dui ', # 0x5f
'Xi ', # 0x60
'Weng ', # 0x61
'Cang ', # 0x62
'Dang ', # 0x63
'Hong ', # 0x64
'Jie ', # 0x65
'Ai ', # 0x66
'Liu ', # 0x67
'Wu ', # 0x68
'Song ', # 0x69
'Qiao ', # 0x6a
'Zi ', # 0x6b
'Wei ', # 0x6c
'Beng ', # 0x6d
'Dian ', # 0x6e
'Cuo ', # 0x6f
'Qian ', # 0x70
'Yong ', # 0x71
'Nie ', # 0x72
'Cuo ', # 0x73
'Ji ', # 0x74
'[?] ', # 0x75
'Tao ', # 0x76
'Song ', # 0x77
'Zong ', # 0x78
'Jiang ', # 0x79
'Liao ', # 0x7a
'Kang ', # 0x7b
'Chan ', # 0x7c
'Die ', # 0x7d
'Cen ', # 0x7e
'Ding ', # 0x7f
'Tu ', # 0x80
'Lou ', # 0x81
'Zhang ', # 0x82
'Zhan ', # 0x83
'Zhan ', # 0x84
'Ao ', # 0x85
'Cao ', # 0x86
'Qu ', # 0x87
'Qiang ', # 0x88
'Zui ', # 0x89
'Zui ', # 0x8a
'Dao ', # 0x8b
'Dao ', # 0x8c
'Xi ', # 0x8d
'Yu ', # 0x8e
'Bo ', # 0x8f
'Long ', # 0x90
'Xiang ', # 0x91
'Ceng ', # 0x92
'Bo ', # 0x93
'Qin ', # 0x94
'Jiao ', # 0x95
'Yan ', # 0x96
'Lao ', # 0x97
'Zhan ', # 0x98
'Lin ', # 0x99
'Liao ', # 0x9a
'Liao ', # 0x9b
'Jin ', # 0x9c
'Deng ', # 0x9d
'Duo ', # 0x9e
'Zun ', # 0x9f
'Jiao ', # 0xa0
'Gui ', # 0xa1
'Yao ', # 0xa2
'Qiao ', # 0xa3
'Yao ', # 0xa4
'Jue ', # 0xa5
'Zhan ', # 0xa6
'Yi ', # 0xa7
'Xue ', # 0xa8
'Nao ', # 0xa9
'Ye ', # 0xaa
'Ye ', # 0xab
'Yi ', # 0xac
'E ', # 0xad
'Xian ', # 0xae
'Ji ', # 0xaf
'Xie ', # 0xb0
'Ke ', # 0xb1
'Xi ', # 0xb2
'Di ', # 0xb3
'Ao ', # 0xb4
'Zui ', # 0xb5
'[?] ', # 0xb6
'Ni ', # 0xb7
'Rong ', # 0xb8
'Dao ', # 0xb9
'Ling ', # 0xba
'Za ', # 0xbb
'Yu ', # 0xbc
'Yue ', # 0xbd
'Yin ', # 0xbe
'[?] ', # 0xbf
'Jie ', # 0xc0
'Li ', # 0xc1
'Sui ', # 0xc2
'Long ', # 0xc3
'Long ', # 0xc4
'Dian ', # 0xc5
'Ying ', # 0xc6
'Xi ', # 0xc7
'Ju ', # 0xc8
'Chan ', # 0xc9
'Ying ', # 0xca
'Kui ', # 0xcb
'Yan ', # 0xcc
'Wei ', # 0xcd
'Nao ', # 0xce
'Quan ', # 0xcf
'Chao ', # 0xd0
'Cuan ', # 0xd1
'Luan ', # 0xd2
'Dian ', # 0xd3
'Dian ', # 0xd4
'[?] ', # 0xd5
'Yan ', # 0xd6
'Yan ', # 0xd7
'Yan ', # 0xd8
'Nao ', # 0xd9
'Yan ', # 0xda
'Chuan ', # 0xdb
'Gui ', # 0xdc
'Chuan ', # 0xdd
'Zhou ', # 0xde
'Huang ', # 0xdf
'Jing ', # 0xe0
'Xun ', # 0xe1
'Chao ', # 0xe2
'Chao ', # 0xe3
'Lie ', # 0xe4
'Gong ', # 0xe5
'Zuo ', # 0xe6
'Qiao ', # 0xe7
'Ju ', # 0xe8
'Gong ', # 0xe9
'Kek ', # 0xea
'Wu ', # 0xeb
'Pwu ', # 0xec
'Pwu ', # 0xed
'Chai ', # 0xee
'Qiu ', # 0xef
'Qiu ', # 0xf0
'Ji ', # 0xf1
'Yi ', # 0xf2
'Si ', # 0xf3
'Ba ', # 0xf4
'Zhi ', # 0xf5
'Zhao ', # 0xf6
'Xiang ', # 0xf7
'Yi ', # 0xf8
'Jin ', # 0xf9
'Xun ', # 0xfa
'Juan ', # 0xfb
'Phas ', # 0xfc
'Xun ', # 0xfd
'Jin ', # 0xfe
'Fu ', # 0xff
)
|
tgquintela/NetSymulationTools | refs/heads/master | NetTools/NetSimulation/dynamic_operations.py | 2 |
"""
Module which groups the different possible dynamics to evolve a system.
TODO
----
- More options.
"""
import numpy as np
def next_step_node(method, variables):
"""This function acts as a switcher between the available methods on this
module or apply the one given by method.
Parameters
----------
method: str or function
method used to obtain the next state of the node from its neighbours.
variables: dict
the information needed to compute the next state of the node. It is
usually composed by:
- pre_states_node: the previous states of the node to evolve.
- pre_states_neig: the previous states of the neighbours.
------------
- the weights of nodes.
- the weights of edges.
Returns
-------
next_state_node: int or float
the next state of the node to be updated.
"""
method = 'conways'
if type(method).__name__ == 'function':
next_state_node = method(**variables)
elif method == 'conways':
# Filter the input variables to only the ones needed.
needed = ['pre_states_node', 'pre_states_neig']
variables = dict([(k, v) for k, v in variables.items()
if k in needed])
# Computation of the next state.
next_state_node = game_life_evolution_f(**variables)
else:
next_state_node = game_life_evolution_f(variables['pre_states_node'],
variables['pre_states_neig'])
return next_state_node
def game_life_evolution_f(pre_states_node, pre_states_neig):
"""This functions recreates the evolution step of the original Conways
game of life.
Parameters
----------
pre_states_node: array_like, shape (Ntmem, 1)
the previous states of the node considered to be updated.
pre_states_neig: array_like, shape (Ntmem, M)
the previous states of the M neighbours.
Returns
-------
next_state: int or float
the next state of the node to be updated.
"""
assert(len(pre_states_node.shape) == 2)
# From life state
if pre_states_node[-1][0]:
life_neig = np.sum(pre_states_neig[-1, :])
next_state = life_neig == 2 or life_neig == 3
# From dead state
else:
next_state = np.sum(pre_states_neig[-1, :]) == 3
return next_state
|
RosiePy/pychess | refs/heads/master | testing/polyglot.py | 21 | import unittest
from pychess.Utils.Board import Board
from pychess.Utils.lutils.leval import LBoard
# Examples taken from http://alpha.uhasselt.be/Research/Algebra/Toga/book_format.html
testcases = [
[ "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1" , 0x463b96181691fc9c ],
[ "rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1" , 0x823c9b50fd114196 ],
[ "rnbqkbnr/ppp1pppp/8/3p4/4P3/8/PPPP1PPP/RNBQKBNR w KQkq d6 0 2", 0x0756b94461c50fb0 ],
[ "rnbqkbnr/ppp1pppp/8/3pP3/8/8/PPPP1PPP/RNBQKBNR b KQkq - 0 2" , 0x662fafb965db29d4 ],
[ "rnbqkbnr/ppp1p1pp/8/3pPp2/8/8/PPPP1PPP/RNBQKBNR w KQkq f6 0 3", 0x22a48b5a8e47ff78 ],
[ "rnbqkbnr/ppp1p1pp/8/3pPp2/8/8/PPPPKPPP/RNBQ1BNR b kq - 0 3" , 0x652a607ca3f242c1 ],
[ "rnbq1bnr/ppp1pkpp/8/3pPp2/8/8/PPPPKPPP/RNBQ1BNR w - - 0 4" , 0x00fdd303c946bdd9 ],
[ "rnbqkbnr/p1pppppp/8/8/PpP4P/8/1P1PPPP1/RNBQKBNR b KQkq c3 0 3", 0x3c8123ea7b067637 ],
[ "rnbqkbnr/p1pppppp/8/8/P6P/R1p5/1P1PPPP1/1NBQKBNR b Kkq - 0 4" , 0x5c3f9b829b279560 ],
]
class PolyglotTestCase(unittest.TestCase):
def testPolyglot_1(self):
"""Testing hash keys agree with Polyglot's"""
for testcase in testcases:
board = LBoard(Board)
board.applyFen(testcase[0])
self.assertEqual(board.hash, testcase[1])
if __name__ == '__main__':
unittest.main()
|
genti/gnt_questionnaire | refs/heads/master | views.py | 1 | # -*- coding: utf-8 -*-
from urllib import *
from django.http import HttpResponseRedirect, Http404,HttpResponse
from django.shortcuts import render_to_response,redirect
from django.core.context_processors import csrf
from django.template import RequestContext
from django.shortcuts import get_object_or_404
from gnt_questionnaire.models import *
from localsite.models import *
import settings
from django import forms
import csv
from django.template.defaultfilters import slugify
from django.contrib.sites.models import RequestSite,Site
from django.core.mail import send_mail, BadHeaderError
from django.core.mail import EmailMultiAlternatives
from datetime import datetime, date
from django.template import (Node, Variable, TemplateSyntaxError,TokenParser, Library, TOKEN_TEXT, TOKEN_VAR)
from django.views.decorators.csrf import csrf_exempt, csrf_protect
def salva(request):
azienda = get_object_or_404(Azienda,pk__exact=request.session.get('azienda', False))
domandePostate=0;
questionario=Questionario.objects.get(pk__exact=request.POST.get('questionario_obj_id',False))
tmp=Domanda.objects.filter(questionario=questionario).filter(tipo=0).filter(multiple=True)
Risultati.objects.filter(questionario=questionario,azienda=azienda,domanda__in=tmp).delete()
for (counter,item) in enumerate(request.POST):
ids=item.split('#')
if(len(ids)==2):
id_questionario=ids[0]
id_domanda=ids[1]
if(len(ids)==3):
id_questionario=ids[0]
id_domanda=ids[1]
id_risposta=ids[2]
if(len(ids)>=2):
# if counter == 0:
#
# tmp=Domanda.objects.filter(questionario=questionario).filter(tipo=0).filter(multiple=True)
# Risultati.objects.filter(questionario=questionario,azienda=azienda,domanda__in=tmp).delete()
domanda=get_object_or_404(Domanda,pk__exact=id_domanda)
if int(domanda.tipo) != 1:
domandePostate = domandePostate+1
'''
risposta chiusa
'''
if int(domanda.tipo) == 0: #domanda chiusa
if domanda.multiple:
risposte=request.POST.getlist(item)
for risposta in risposte:
try:
risposta_chiusa=Risposta_chiusa.objects.get(id__exact=risposta)
except:
assert False, risposta
risultato, created = Risultati.objects.get_or_create(azienda=azienda,domanda=domanda,risposta_chiusa=risposta_chiusa,questionario=questionario)
risultato.testo=str(risposta)
risultato.save()
else:
risposta=request.POST.get(item,False)
try:
risposta_chiusa=Risposta_chiusa.objects.get(id__exact=risposta)
except: #boolean
try:
risposta_splitted=risposta.split('#')
risposta_chiusa=Risposta_chiusa.objects.get(id__exact=risposta_splitted[0])
risposta=risposta_splitted[1]
except:
raise TemplateSyntaxError("Errore nel salvataggio risposta chiusa. Name: %s Value: %s " % (item, risposta))
risultato, created = Risultati.objects.get_or_create(azienda=azienda,domanda=domanda,risposta_chiusa=risposta_chiusa,questionario=questionario)
risultato.testo=str(risposta)
risultato.save()
'''
risposta di tipo range
nome del campo = questionario#domanda#risposta
valore = valore nel range
'''
if int(domanda.tipo) == 2: #range
risposta=request.POST.get(item,False)
try:
risposta_chiusa=Risposta_chiusa.objects.get(id__exact=ids[2])
except:
assert False, risposta
risultato, created = Risultati.objects.get_or_create(azienda=azienda,domanda=domanda,risposta_chiusa=risposta_chiusa,questionario=questionario)
risultato.testo=str(risposta)
risultato.save()
'''
risposta aperta
'''
if int(domanda.tipo) == 1:
risposta_aperta=get_object_or_404(Risposta_aperta,id__exact=int(id_risposta))
risultato, created = Risultati.objects.get_or_create(azienda=azienda,domanda=domanda,risposta_aperta=risposta_aperta,questionario=questionario)
risultato.testo=str(request.POST.get(item,''))
if risultato.testo != '':
domandePostate = domandePostate+1
risultato.save()
'''
risposta modulare
'''
if int(domanda.tipo) == 3:
checkedFirst=False
risposta_aperta=get_object_or_404(Risposta_aperta,id__exact=int(id_risposta))
Risultati.objects.filter(questionario=questionario,azienda=azienda,domanda=domanda,risposta_aperta=risposta_aperta).delete()
for clustered in request.POST.getlist(item):
if str(clustered) != '':
risultato = Risultati(azienda=azienda,domanda=domanda,risposta_aperta=risposta_aperta,questionario=questionario,risposta_aperta_cluster=risposta_aperta)
risultato.testo=str(clustered)
if not checkedFirst:
domandePostate = domandePostate+1
checkedFirst=True
risultato.save()
# check del totale dei risultati
# totDomande=0;
if questionario:
totDomande = len(questionario.domanda_set.all())
obj,created= GestoreQuestionari.objects.get_or_create(azienda=azienda,questionario=questionario)
if domandePostate == totDomande:
#questionario completo
obj.status=2
else:
#questionario parziale
obj.status=1
obj.save()
#assert False, "%s su %s" % (domandePostate,totDomande)
return HttpResponseRedirect(request.META['HTTP_REFERER']);
#return render_to_response('questionnaire/questionario_results.html', {'post':request.POST}, context_instance=RequestContext(request))
|
michelts/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/django/http/utils.py | 200 | """
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
response.content = ''
response['Content-Length'] = 0
if request.method == 'HEAD':
response.content = ''
return response
def fix_IE_for_attach(request, response):
"""
This function will prevent Django from serving a Content-Disposition header
while expecting the browser to cache it (only when the browser is IE). This
leads to IE not allowing the client to download.
"""
useragent = request.META.get('HTTP_USER_AGENT', '').upper()
if 'MSIE' not in useragent and 'CHROMEFRAME' not in useragent:
return response
offending_headers = ('no-cache', 'no-store')
if response.has_header('Content-Disposition'):
try:
del response['Pragma']
except KeyError:
pass
if response.has_header('Cache-Control'):
cache_control_values = [value.strip() for value in
response['Cache-Control'].split(',')
if value.strip().lower() not in offending_headers]
if not len(cache_control_values):
del response['Cache-Control']
else:
response['Cache-Control'] = ', '.join(cache_control_values)
return response
def fix_IE_for_vary(request, response):
"""
This function will fix the bug reported at
http://support.microsoft.com/kb/824847/en-us?spid=8722&sid=global
by clearing the Vary header whenever the mime-type is not safe
enough for Internet Explorer to handle. Poor thing.
"""
useragent = request.META.get('HTTP_USER_AGENT', '').upper()
if 'MSIE' not in useragent and 'CHROMEFRAME' not in useragent:
return response
# These mime-types that are decreed "Vary-safe" for IE:
safe_mime_types = ('text/html', 'text/plain', 'text/sgml')
# The first part of the Content-Type field will be the MIME type,
# everything after ';', such as character-set, can be ignored.
if response['Content-Type'].split(';')[0] not in safe_mime_types:
try:
del response['Vary']
except KeyError:
pass
return response
|
unifycore/ryu | refs/heads/master | ryu/ofproto/nx_match.py | 18 | # Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
# Copyright (C) 2012 Simon Horman <horms ad verge net au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import itertools
from ryu import exception
from ryu.lib import mac
from . import ofproto_parser
from . import ofproto_v1_0
from . import inet
import logging
LOG = logging.getLogger('ryu.ofproto.nx_match')
UINT64_MAX = (1 << 64) - 1
UINT32_MAX = (1 << 32) - 1
UINT16_MAX = (1 << 16) - 1
FWW_IN_PORT = 1 << 0
FWW_DL_TYPE = 1 << 4
FWW_NW_PROTO = 1 << 5
# No corresponding OFPFW_* bits
FWW_NW_DSCP = 1 << 1
FWW_NW_ECN = 1 << 2
FWW_ARP_SHA = 1 << 3
FWW_ARP_THA = 1 << 6
FWW_IPV6_LABEL = 1 << 7
FWW_NW_TTL = 1 << 8
FWW_ALL = (1 << 13) - 1
FLOW_NW_FRAG_ANY = 1 << 0
FLOW_NW_FRAG_LATER = 1 << 1
FLOW_NW_FRAG_MASK = FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER
IP_ECN_MASK = 0x03
IP_DSCP_MASK = 0xfc
MF_PACK_STRING_BE64 = '!Q'
MF_PACK_STRING_BE32 = '!I'
MF_PACK_STRING_BE16 = '!H'
MF_PACK_STRING_8 = '!B'
MF_PACK_STRING_MAC = '!6s'
MF_PACK_STRING_IPV6 = '!8H'
_MF_FIELDS = {}
FLOW_N_REGS = 8 # ovs 1.5
class Flow(object):
def __init__(self):
self.in_port = 0
self.dl_vlan = 0
self.dl_vlan_pcp = 0
self.dl_src = mac.DONTCARE
self.dl_dst = mac.DONTCARE
self.dl_type = 0
self.tp_dst = 0
self.tp_src = 0
self.nw_tos = 0
self.vlan_tci = 0
self.nw_ttl = 0
self.nw_proto = 0
self.arp_sha = 0
self.arp_tha = 0
self.nw_src = 0
self.nw_dst = 0
self.tun_id = 0
self.arp_spa = 0
self.arp_tpa = 0
self.ipv6_src = []
self.ipv6_dst = []
self.nd_target = []
self.nw_frag = 0
self.regs = [0] * FLOW_N_REGS
self.ipv6_label = 0
class FlowWildcards(object):
def __init__(self):
self.dl_src_mask = 0
self.dl_dst_mask = 0
self.tp_src_mask = 0
self.tp_dst_mask = 0
self.nw_src_mask = 0
self.nw_dst_mask = 0
self.tun_id_mask = 0
self.arp_spa_mask = 0
self.arp_tpa_mask = 0
self.vlan_tci_mask = 0
self.ipv6_src_mask = []
self.ipv6_dst_mask = []
self.nd_target_mask = []
self.nw_frag_mask = 0
self.regs_bits = 0
self.regs_mask = [0] * FLOW_N_REGS
self.wildcards = ofproto_v1_0.OFPFW_ALL
class ClsRule(object):
"""describe a matching rule for OF 1.0 OFPMatch (and NX).
"""
def __init__(self):
self.wc = FlowWildcards()
self.flow = Flow()
def set_in_port(self, port):
self.wc.wildcards &= ~FWW_IN_PORT
self.flow.in_port = port
def set_dl_vlan(self, dl_vlan):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
self.flow.dl_vlan = dl_vlan
def set_dl_vlan_pcp(self, dl_vlan_pcp):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
self.flow.dl_vlan_pcp = dl_vlan_pcp
def set_dl_dst(self, dl_dst):
self.flow.dl_dst = dl_dst
def set_dl_dst_masked(self, dl_dst, mask):
self.wc.dl_dst_mask = mask
# bit-wise and of the corresponding elements of dl_dst and mask
self.flow.dl_dst = mac.haddr_bitand(dl_dst, mask)
def set_dl_src(self, dl_src):
self.flow.dl_src = dl_src
def set_dl_src_masked(self, dl_src, mask):
self.wc.dl_src_mask = mask
self.flow.dl_src = mac.haddr_bitand(dl_src, mask)
def set_dl_type(self, dl_type):
self.wc.wildcards &= ~FWW_DL_TYPE
self.flow.dl_type = dl_type
def set_dl_tci(self, tci):
self.set_dl_tci_masked(tci, UINT16_MAX)
def set_dl_tci_masked(self, tci, mask):
self.wc.vlan_tci_mask = mask
self.flow.vlan_tci = tci
def set_tp_src(self, tp_src):
self.set_tp_src_masked(tp_src, UINT16_MAX)
def set_tp_src_masked(self, tp_src, mask):
self.wc.tp_src_mask = mask
self.flow.tp_src = tp_src & mask
def set_tp_dst(self, tp_dst):
self.set_tp_dst_masked(tp_dst, UINT16_MAX)
def set_tp_dst_masked(self, tp_dst, mask):
self.wc.tp_dst_mask = mask
self.flow.tp_dst = tp_dst & mask
def set_nw_proto(self, nw_proto):
self.wc.wildcards &= ~FWW_NW_PROTO
self.flow.nw_proto = nw_proto
def set_nw_src(self, nw_src):
self.set_nw_src_masked(nw_src, UINT32_MAX)
def set_nw_src_masked(self, nw_src, mask):
self.flow.nw_src = nw_src
self.wc.nw_src_mask = mask
def set_nw_dst(self, nw_dst):
self.set_nw_dst_masked(nw_dst, UINT32_MAX)
def set_nw_dst_masked(self, nw_dst, mask):
self.flow.nw_dst = nw_dst
self.wc.nw_dst_mask = mask
def set_nw_dscp(self, nw_dscp):
self.wc.wildcards &= ~FWW_NW_DSCP
self.flow.nw_tos &= ~IP_DSCP_MASK
self.flow.nw_tos |= nw_dscp & IP_DSCP_MASK
def set_icmp_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmp_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_tun_id(self, tun_id):
self.set_tun_id_masked(tun_id, UINT64_MAX)
def set_tun_id_masked(self, tun_id, mask):
self.wc.tun_id_mask = mask
self.flow.tun_id = tun_id & mask
def set_nw_ecn(self, nw_ecn):
self.wc.wildcards &= ~FWW_NW_ECN
self.flow.nw_tos &= ~IP_ECN_MASK
self.flow.nw_tos |= nw_ecn & IP_ECN_MASK
def set_nw_ttl(self, nw_ttl):
self.wc.wildcards &= ~FWW_NW_TTL
self.flow.nw_ttl = nw_ttl
def set_nw_frag(self, nw_frag):
self.wc.nw_frag_mask |= FLOW_NW_FRAG_MASK
self.flow.nw_frag = nw_frag
def set_nw_frag_masked(self, nw_frag, mask):
self.wc.nw_frag_mask = mask
self.flow.nw_frag = nw_frag & mask
def set_arp_spa(self, spa):
self.set_arp_spa_masked(spa, UINT32_MAX)
def set_arp_spa_masked(self, spa, mask):
self.flow.arp_spa = spa
self.wc.arp_spa_mask = mask
def set_arp_tpa(self, tpa):
self.set_arp_tpa_masked(tpa, UINT32_MAX)
def set_arp_tpa_masked(self, tpa, mask):
self.flow.arp_tpa = tpa
self.wc.arp_tpa_mask = mask
def set_arp_sha(self, sha):
self.wc.wildcards &= ~FWW_ARP_SHA
self.flow.arp_sha = sha
def set_arp_tha(self, tha):
self.wc.wildcards &= ~FWW_ARP_THA
self.flow.arp_tha = tha
def set_icmpv6_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmpv6_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_src_masked(self, src, mask):
self.wc.ipv6_src_mask = mask
self.flow.ipv6_src = [x & y for (x, y) in itertools.izip(src, mask)]
def set_ipv6_src(self, src):
self.flow.ipv6_src = src
def set_ipv6_dst_masked(self, dst, mask):
self.wc.ipv6_dst_mask = mask
self.flow.ipv6_dst = [x & y for (x, y) in itertools.izip(dst, mask)]
def set_ipv6_dst(self, dst):
self.flow.ipv6_dst = dst
def set_nd_target_masked(self, target, mask):
self.wc.nd_target_mask = mask
self.flow.nd_target = [x & y for (x, y) in
itertools.izip(target, mask)]
def set_nd_target(self, target):
self.flow.nd_target = target
def set_reg(self, reg_idx, value):
self.set_reg_masked(reg_idx, value, 0)
def set_reg_masked(self, reg_idx, value, mask):
self.wc.regs_mask[reg_idx] = mask
self.flow.regs[reg_idx] = value
self.wc.regs_bits |= (1 << reg_idx)
def flow_format(self):
# Tunnel ID is only supported by NXM
if self.wc.tun_id_mask != 0:
return ofproto_v1_0.NXFF_NXM
# Masking DL_DST is only supported by NXM
if self.wc.dl_dst_mask:
return ofproto_v1_0.NXFF_NXM
# Masking DL_SRC is only supported by NXM
if self.wc.dl_src_mask:
return ofproto_v1_0.NXFF_NXM
# ECN is only supported by NXM
if not self.wc.wildcards & FWW_NW_ECN:
return ofproto_v1_0.NXFF_NXM
return ofproto_v1_0.NXFF_OPENFLOW10
def match_tuple(self):
"""return a tuple which can be used as *args for
ofproto_v1_0_parser.OFPMatch.__init__().
see Datapath.send_flow_mod.
"""
assert self.flow_format() == ofproto_v1_0.NXFF_OPENFLOW10
wildcards = ofproto_v1_0.OFPFW_ALL
if not self.wc.wildcards & FWW_IN_PORT:
wildcards &= ~ofproto_v1_0.OFPFW_IN_PORT
if self.flow.dl_src != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_SRC
if self.flow.dl_dst != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_DST
if not self.wc.wildcards & FWW_DL_TYPE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_TYPE
if self.flow.dl_vlan != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
if self.flow.dl_vlan_pcp != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
if self.flow.nw_tos != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_TOS
if self.flow.nw_proto != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_PROTO
if self.wc.nw_src_mask != 0 and "01" not in bin(self.wc.nw_src_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_SRC_MASK
maskbits = (bin(self.wc.nw_src_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
if self.wc.nw_dst_mask != 0 and "01" not in bin(self.wc.nw_dst_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_DST_MASK
maskbits = (bin(self.wc.nw_dst_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_DST_SHIFT)
if self.flow.tp_src != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_SRC
if self.flow.tp_dst != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_DST
return (wildcards, self.flow.in_port, self.flow.dl_src,
self.flow.dl_dst, self.flow.dl_vlan, self.flow.dl_vlan_pcp,
self.flow.dl_type, self.flow.nw_tos & IP_DSCP_MASK,
self.flow.nw_proto, self.flow.nw_src, self.flow.nw_dst,
self.flow.tp_src, self.flow.tp_dst)
def _set_nxm_headers(nxm_headers):
'''Annotate corresponding NXM header'''
def _set_nxm_headers_dec(self):
self.nxm_headers = nxm_headers
return self
return _set_nxm_headers_dec
def _register_make(cls):
'''class decorator to Register mf make'''
assert cls.nxm_headers is not None
assert cls.nxm_headers is not []
for nxm_header in cls.nxm_headers:
assert nxm_header not in _MF_FIELDS
_MF_FIELDS[nxm_header] = cls.make
return cls
def mf_from_nxm_header(nxm_header):
if nxm_header not in _MF_FIELDS:
return None
make = _MF_FIELDS.get(nxm_header)
assert make is not None
return make(nxm_header)
class MFField(object):
_FIELDS_HEADERS = {}
@staticmethod
def register_field_header(headers):
def _register_field_header(cls):
for header in headers:
MFField._FIELDS_HEADERS[header] = cls
return cls
return _register_field_header
def __init__(self, nxm_header, pack_str):
self.nxm_header = nxm_header
self.pack_str = pack_str
self.n_bytes = struct.calcsize(pack_str)
self.n_bits = self.n_bytes * 8
@classmethod
def parser(cls, buf, offset):
(header,) = struct.unpack_from('!I', buf, offset)
cls_ = MFField._FIELDS_HEADERS.get(header)
if cls_:
field = cls_.field_parser(header, buf, offset)
else:
# print 'unknown field type'
raise
field.length = (header & 0xff) + 4
return field
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if hasmask:
pack_str = '!' + cls.pack_str[1:] * 2
(value, mask) = struct.unpack_from(pack_str, buf,
offset + 4)
else:
(value,) = struct.unpack_from(cls.pack_str, buf,
offset + 4)
return cls(header, value, mask)
def _put(self, buf, offset, value):
ofproto_parser.msg_pack_into(self.pack_str, buf, offset, value)
return self.n_bytes
def putw(self, buf, offset, value, mask):
len_ = self._put(buf, offset, value)
return len_ + self._put(buf, offset + len_, mask)
def _is_all_ones(self, value):
return value == (1 << self.n_bits) - 1
def putm(self, buf, offset, value, mask):
if mask == 0:
return 0
elif self._is_all_ones(mask):
return self._put(buf, offset, value)
else:
return self.putw(buf, offset, value, mask)
def _putv6(self, buf, offset, value):
ofproto_parser.msg_pack_into(self.pack_str, buf, offset,
*value)
return self.n_bytes
def putv6(self, buf, offset, value, mask):
len_ = self._putv6(buf, offset, value)
if len(mask):
return len_ + self._putv6(buf, offset + len_, mask)
return len_
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IN_PORT])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IN_PORT])
class MFInPort(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFInPort, self).__init__(header, MFInPort.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFInPort.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.in_port)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_DST, ofproto_v1_0.NXM_OF_ETH_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_DST,
ofproto_v1_0.NXM_OF_ETH_DST_W])
class MFEthDst(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthDst, self).__init__(header, MFEthDst.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthDst.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_dst_mask:
return self.putw(buf, offset, rule.flow.dl_dst,
rule.wc.dl_dst_mask)
else:
return self._put(buf, offset, rule.flow.dl_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_SRC, ofproto_v1_0.NXM_OF_ETH_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_SRC,
ofproto_v1_0.NXM_OF_ETH_SRC_W])
class MFEthSrc(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthSrc, self).__init__(header, MFEthSrc.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthSrc.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_src_mask:
return self.putw(buf, offset, rule.flow.dl_src,
rule.wc.dl_src_mask)
else:
return self._put(buf, offset, rule.flow.dl_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_TYPE])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_TYPE])
class MFEthType(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFEthType, self).__init__(header, MFEthType.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthType.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.dl_type)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
class MFVlan(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFVlan, self).__init__(header, MFVlan.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFVlan.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.vlan_tci,
rule.wc.vlan_tci_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_TOS])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_TOS])
class MFIPDSCP(MFField):
pack_str = MF_PACK_STRING_8
def __init__(self, header, value, mask=None):
super(MFIPDSCP, self).__init__(header, MFIPDSCP.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFIPDSCP.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_DSCP_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
class MFTunId(MFField):
pack_str = MF_PACK_STRING_BE64
def __init__(self, header, value, mask=None):
super(MFTunId, self).__init__(header, MFTunId.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFTunId.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tun_id, rule.wc.tun_id_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_SRC, ofproto_v1_0.NXM_OF_IP_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_SRC,
ofproto_v1_0.NXM_OF_IP_SRC_W])
class MFIPSrc(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPSrc, self).__init__(header, MFIPSrc.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPSrc.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_src, rule.wc.nw_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_DST, ofproto_v1_0.NXM_OF_IP_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_DST,
ofproto_v1_0.NXM_OF_IP_DST_W])
class MFIPDst(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPDst, self).__init__(header, MFIPDst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPDst.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_dst, rule.wc.nw_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_ECN])
class MFIPECN(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_ECN_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_TTL])
class MFIPTTL(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_ttl)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_PROTO])
class MFIPProto(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_proto)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_SRC, ofproto_v1_0.NXM_OF_TCP_SRC_W,
ofproto_v1_0.NXM_OF_UDP_SRC, ofproto_v1_0.NXM_OF_UDP_SRC_W])
class MFTPSRC(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_src, rule.wc.tp_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_DST, ofproto_v1_0.NXM_OF_TCP_DST_W,
ofproto_v1_0.NXM_OF_UDP_DST, ofproto_v1_0.NXM_OF_UDP_DST_W])
class MFTPDST(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_dst, rule.wc.tp_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_SPA, ofproto_v1_0.NXM_OF_ARP_SPA_W])
class MFArpSpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_spa, rule.wc.arp_spa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_TPA, ofproto_v1_0.NXM_OF_ARP_TPA_W])
class MFArpTpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_tpa, rule.wc.arp_tpa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_SHA])
class MFArpSha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_sha)
class MFIPV6(object):
pack_str = MF_PACK_STRING_IPV6
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
if hasmask:
pack_string = '!' + cls.pack_str[1:] * 2
value = struct.unpack_from(pack_string, buf, offset + 4)
return cls(header, list(value[:8]), list(value[8:]))
else:
value = struct.unpack_from(cls.pack_str, buf, offset + 4)
return cls(header, list(value))
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
class MFIPV6Src(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Src, self).__init__(header, MFIPV6Src.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_src,
rule.wc.ipv6_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
class MFIPV6Dst(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Dst, self).__init__(header, MFIPV6Dst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_dst,
rule.wc.ipv6_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ND_TARGET,
ofproto_v1_0.NXM_NX_ND_TARGET_W])
class MFNdTarget(MFField):
@classmethod
def make(cls, header):
return cls(header, '!4I')
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.nd_target,
rule.wc.nd_target_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_FRAG,
ofproto_v1_0.NXM_NX_IP_FRAG_W])
class MFIpFrag(MFField):
@classmethod
def make(cls, header):
return cls(header, '!B')
def put(self, buf, offset, rule):
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
return self._put(buf, offset, rule.flow.nw_frag)
else:
return self.putw(buf, offset, rule.flow.nw_frag,
rule.wc.nw_frag_mask & FLOW_NW_FRAG_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_THA])
class MFArpTha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_tha)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_TYPE])
class MFICMPType(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_CODE])
class MFICMPCode(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_TYPE])
class MFICMPV6Type(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_CODE])
class MFICMPV6Code(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_LABEL])
class MFICMPV6Label(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.ipv6_label)
@_register_make
@_set_nxm_headers([ofproto_v1_0.nxm_nx_reg(i) for i in range(FLOW_N_REGS)]
+ [ofproto_v1_0.nxm_nx_reg_w(i) for i in range(FLOW_N_REGS)])
class MFRegister(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
for i in range(FLOW_N_REGS):
if (ofproto_v1_0.nxm_nx_reg(i) == self.nxm_header or
ofproto_v1_0.nxm_nx_reg_w(i) == self.nxm_header):
if rule.wc.regs_mask[i]:
return self.putm(buf, offset, rule.flow.regs[i],
rule.wc.regs_mask[i])
else:
return self._put(buf, offset, rule.flow.regs[i])
def serialize_nxm_match(rule, buf, offset):
old_offset = offset
if not rule.wc.wildcards & FWW_IN_PORT:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IN_PORT, rule)
# Ethernet.
if rule.flow.dl_dst != mac.DONTCARE:
if rule.wc.dl_dst_mask:
header = ofproto_v1_0.NXM_OF_ETH_DST_W
else:
header = ofproto_v1_0.NXM_OF_ETH_DST
offset += nxm_put(buf, offset, header, rule)
if rule.flow.dl_src != mac.DONTCARE:
if rule.wc.dl_src_mask:
header = ofproto_v1_0.NXM_OF_ETH_SRC_W
else:
header = ofproto_v1_0.NXM_OF_ETH_SRC
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_DL_TYPE:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ETH_TYPE, rule)
# 802.1Q
if rule.wc.vlan_tci_mask != 0:
if rule.wc.vlan_tci_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_VLAN_TCI
else:
header = ofproto_v1_0.NXM_OF_VLAN_TCI_W
offset += nxm_put(buf, offset, header, rule)
# L3
if not rule.wc.wildcards & FWW_NW_DSCP:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_TOS, rule)
if not rule.wc.wildcards & FWW_NW_ECN:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_ECN, rule)
if not rule.wc.wildcards & FWW_NW_TTL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_TTL, rule)
if not rule.wc.wildcards & FWW_NW_PROTO:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_PROTO, rule)
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMP):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_TYPE, rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_CODE, rule)
if rule.flow.tp_src != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_SRC
else:
header = ofproto_v1_0.NXM_OF_TCP_SRC_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_SRC
else:
header = ofproto_v1_0.NXM_OF_UDP_SRC_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tp_dst != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_DST
else:
header = ofproto_v1_0.NXM_OF_TCP_DST_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_DST
else:
header = ofproto_v1_0.NXM_OF_UDP_DST_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
# IP Source and Destination
if rule.flow.nw_src != 0:
if rule.wc.nw_src_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_SRC
else:
header = ofproto_v1_0.NXM_OF_IP_SRC_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.nw_dst != 0:
if rule.wc.nw_dst_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_DST
else:
header = ofproto_v1_0.NXM_OF_IP_DST_W
offset += nxm_put(buf, offset, header, rule)
# IPv6
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMPV6):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_TYPE,
rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_CODE,
rule)
if not rule.wc.wildcards & FWW_IPV6_LABEL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IPV6_LABEL, rule)
if len(rule.flow.ipv6_src):
if len(rule.wc.ipv6_src_mask):
header = ofproto_v1_0.NXM_NX_IPV6_SRC_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_SRC
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.ipv6_dst):
if len(rule.wc.ipv6_dst_mask):
header = ofproto_v1_0.NXM_NX_IPV6_DST_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_DST
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.nd_target):
if len(rule.wc.nd_target_mask):
header = ofproto_v1_0.NXM_NX_ND_TARGET_W
else:
header = ofproto_v1_0.NXM_NX_ND_TARGET
offset += nxm_put(buf, offset, header, rule)
# ARP
if rule.flow.arp_spa != 0:
if rule.wc.arp_spa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_SPA
else:
header = ofproto_v1_0.NXM_OF_ARP_SPA_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.arp_tpa != 0:
if rule.wc.arp_tpa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_TPA
else:
header = ofproto_v1_0.NXM_OF_ARP_TPA_W
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_ARP_SHA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_SHA, rule)
if not rule.wc.wildcards & FWW_ARP_THA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_THA, rule)
if rule.flow.nw_frag:
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
header = ofproto_v1_0.NXM_NX_IP_FRAG
else:
header = ofproto_v1_0.NXM_NX_IP_FRAG_W
offset += nxm_put(buf, offset, header, rule)
# Tunnel Id
if rule.wc.tun_id_mask != 0:
if rule.wc.tun_id_mask == UINT64_MAX:
header = ofproto_v1_0.NXM_NX_TUN_ID
else:
header = ofproto_v1_0.NXM_NX_TUN_ID_W
offset += nxm_put(buf, offset, header, rule)
# XXX: Cookie
for i in range(FLOW_N_REGS):
if rule.wc.regs_bits & (1 << i):
if rule.wc.regs_mask[i]:
header = ofproto_v1_0.nxm_nx_reg_w(i)
else:
header = ofproto_v1_0.nxm_nx_reg(i)
offset += nxm_put(buf, offset, header, rule)
# Pad
pad_len = round_up(offset) - offset
ofproto_parser.msg_pack_into("%dx" % pad_len, buf, offset)
# The returned length, the match_len, does not include the pad
return offset - old_offset
def nxm_put(buf, offset, header, rule):
nxm = NXMatch(header)
len_ = nxm.put_header(buf, offset)
mf = mf_from_nxm_header(nxm.header)
return len_ + mf.put(buf, offset + len_, rule)
def round_up(length):
return (length + 7) / 8 * 8 # Round up to a multiple of 8
class NXMatch(object):
def __init__(self, header):
self.header = header
@classmethod
def parser(cls, buf, offset, match_len):
if match_len < 4:
raise exception.OFPMalformedMessage
(header,) = struct.unpack_from(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset)
instance = cls(header)
payload_len = instance.length()
if payload_len == 0 or match_len < payload_len + 4:
raise exception.OFPMalformedMessage
return instance
def vendor(self):
return self.header >> 16
def field(self):
return (self.header >> 9) % 0x7f
def type(self):
return (self.header >> 9) % 0x7fffff
def hasmask(self):
return (self.header >> 8) & 1
def length(self):
return self.header & 0xff
def show(self):
return ('%08x (vendor=%x, field=%x, hasmask=%x len=%x)' %
(self.header, self.vendor(), self.field(),
self.hasmask(), self.length()))
def put_header(self, buf, offset):
ofproto_parser.msg_pack_into(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset, self.header)
return struct.calcsize(ofproto_v1_0.NXM_HEADER_PACK_STRING)
|
cetic/ansible | refs/heads/devel | hacking/metadata-tool.py | 20 | #!/usr/bin/env python
# (c) 2016-2017, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import csv
import os
import sys
from collections import defaultdict
from distutils.version import StrictVersion
from pprint import pformat, pprint
from ansible.parsing.metadata import ParseError, extract_metadata
from ansible.plugins import module_loader
# There's a few files that are not new-style modules. Have to blacklist them
NONMODULE_PY_FILES = frozenset(('async_wrapper.py',))
NONMODULE_MODULE_NAMES = frozenset(os.path.splitext(p)[0] for p in NONMODULE_PY_FILES)
# Default metadata
DEFAULT_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'}
class MissingModuleError(Exception):
"""Thrown when unable to find a plugin"""
pass
def usage():
print("""Usage:
metadata-tool.py report [--version X]
metadata-tool.py add [--version X] [--overwrite] CSVFILE
metadata-tool.py add-default [--version X] [--overwrite]
medatada-tool.py upgrade [--version X]""")
sys.exit(1)
def parse_args(arg_string):
if len(arg_string) < 1:
usage()
action = arg_string[0]
version = None
if '--version' in arg_string:
version_location = arg_string.index('--version')
arg_string.pop(version_location)
version = arg_string.pop(version_location)
overwrite = False
if '--overwrite' in arg_string:
overwrite = True
arg_string.remove('--overwrite')
csvfile = None
if len(arg_string) == 2:
csvfile = arg_string[1]
elif len(arg_string) > 2:
usage()
return action, {'version': version, 'overwrite': overwrite, 'csvfile': csvfile}
def find_documentation(module_data):
"""Find the DOCUMENTATION metadata for a module file"""
start_line = -1
mod_ast_tree = ast.parse(module_data)
for child in mod_ast_tree.body:
if isinstance(child, ast.Assign):
for target in child.targets:
if target.id == 'DOCUMENTATION':
start_line = child.lineno - 1
break
return start_line
def remove_metadata(module_data, start_line, start_col, end_line, end_col):
"""Remove a section of a module file"""
lines = module_data.split('\n')
new_lines = lines[:start_line]
if start_col != 0:
new_lines.append(lines[start_line][:start_col])
next_line = lines[end_line]
if len(next_line) - 1 != end_col:
new_lines.append(next_line[end_col:])
if len(lines) > end_line:
new_lines.extend(lines[end_line + 1:])
return '\n'.join(new_lines)
def insert_metadata(module_data, new_metadata, insertion_line, targets=('ANSIBLE_METADATA',)):
"""Insert a new set of metadata at a specified line"""
assignments = ' = '.join(targets)
pretty_metadata = pformat(new_metadata, width=1).split('\n')
new_lines = []
new_lines.append('{} = {}'.format(assignments, pretty_metadata[0]))
if len(pretty_metadata) > 1:
for line in pretty_metadata[1:]:
new_lines.append('{}{}'.format(' ' * (len(assignments) - 1 + len(' = {')), line))
old_lines = module_data.split('\n')
lines = old_lines[:insertion_line] + new_lines + [''] + old_lines[insertion_line:]
return '\n'.join(lines)
def parse_assigned_metadata_initial(csvfile):
"""
Fields:
:0: Module name
:1: Core (x if so)
:2: Extras (x if so)
:3: Category
:4: Supported/SLA
:5: Curated
:6: Stable
:7: Deprecated
:8: Notes
:9: Team Notes
:10: Notes 2
:11: final supported_by field
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
if record[12] == 'core':
supported_by = 'core'
elif record[12] == 'curated':
supported_by = 'curated'
elif record[12] == 'community':
supported_by = 'community'
else:
print('Module %s has no supported_by field. Using community' % record[0])
supported_by = 'community'
supported_by = DEFAULT_METADATA['supported_by']
status = []
if record[6]:
status.append('stableinterface')
if record[7]:
status.append('deprecated')
if not status:
status.extend(DEFAULT_METADATA['status'])
yield (module, {'version': DEFAULT_METADATA['metadata_version'], 'supported_by': supported_by, 'status': status})
def parse_assigned_metadata(csvfile):
"""
Fields:
:0: Module name
:1: supported_by string. One of the valid support fields
core, community, curated
:2: stableinterface
:3: preview
:4: deprecated
:5: removed
https://github.com/ansible/proposals/issues/30
"""
with open(csvfile, 'rb') as f:
for record in csv.reader(f):
module = record[0]
supported_by = record[1]
status = []
if record[2]:
status.append('stableinterface')
if record[4]:
status.append('deprecated')
if record[5]:
status.append('removed')
if not status or record[3]:
status.append('preview')
yield (module, {'metadata_version': '1.0', 'supported_by': supported_by, 'status': status})
def write_metadata(filename, new_metadata, version=None, overwrite=False):
with open(filename, 'rb') as f:
module_data = f.read()
try:
current_metadata, start_line, start_col, end_line, end_col, targets = \
extract_metadata(module_data=module_data, offsets=True)
except SyntaxError:
if filename.endswith('.py'):
raise
# Probably non-python modules. These should all have python
# documentation files where we can place the data
raise ParseError('Could not add metadata to {}'.format(filename))
if current_metadata is None:
# No current metadata so we can just add it
start_line = find_documentation(module_data)
if start_line < 0:
if os.path.basename(filename) in NONMODULE_PY_FILES:
# These aren't new-style modules
return
raise Exception('Module file {} had no ANSIBLE_METADATA or DOCUMENTATION'.format(filename))
module_data = insert_metadata(module_data, new_metadata, start_line, targets=('ANSIBLE_METADATA',))
elif overwrite or (version is not None and ('metadata_version' not in current_metadata or
StrictVersion(current_metadata['metadata_version']) < StrictVersion(version))):
# Current metadata that we do not want. Remove the current
# metadata and put the new version in its place
module_data = remove_metadata(module_data, start_line, start_col, end_line, end_col)
module_data = insert_metadata(module_data, new_metadata, start_line, targets=targets)
else:
# Current metadata and we don't want to overwrite it
return
# Save the new version of the module
with open(filename, 'wb') as f:
f.write(module_data)
def return_metadata(plugins):
"""Get the metadata for all modules
Handle duplicate module names
:arg plugins: List of plugins to look for
:returns: Mapping of plugin name to metadata dictionary
"""
metadata = {}
for name, filename in plugins:
# There may be several files for a module (if it is written in another
# language, for instance) but only one of them (the .py file) should
# contain the metadata.
if name not in metadata or metadata[name] is not None:
with open(filename, 'rb') as f:
module_data = f.read()
metadata[name] = extract_metadata(module_data=module_data, offsets=True)[0]
return metadata
def metadata_summary(plugins, version=None):
"""Compile information about the metadata status for a list of modules
:arg plugins: List of plugins to look for. Each entry in the list is
a tuple of (module name, full path to module)
:kwarg version: If given, make sure the modules have this version of
metadata or higher.
:returns: A tuple consisting of a list of modules with no metadata at the
required version and a list of files that have metadata at the
required version.
"""
no_metadata = {}
has_metadata = {}
supported_by = defaultdict(set)
status = defaultdict(set)
requested_version = StrictVersion(version)
all_mods_metadata = return_metadata(plugins)
for name, filename in plugins:
# Does the module have metadata?
if name not in no_metadata and name not in has_metadata:
metadata = all_mods_metadata[name]
if metadata is None:
no_metadata[name] = filename
elif version is not None and ('metadata_version' not in metadata or StrictVersion(metadata['metadata_version']) < requested_version):
no_metadata[name] = filename
else:
has_metadata[name] = filename
# What categories does the plugin belong in?
if all_mods_metadata[name] is None:
# No metadata for this module. Use the default metadata
supported_by[DEFAULT_METADATA['supported_by']].add(filename)
status[DEFAULT_METADATA['status'][0]].add(filename)
else:
supported_by[all_mods_metadata[name]['supported_by']].add(filename)
for one_status in all_mods_metadata[name]['status']:
status[one_status].add(filename)
return list(no_metadata.values()), list(has_metadata.values()), supported_by, status
# Filters to convert between metadata versions
def convert_metadata_pre_1_0_to_1_0(metadata):
"""
Convert pre-1.0 to 1.0 metadata format
:arg metadata: The old metadata
:returns: The new metadata
Changes from pre-1.0 to 1.0:
* ``version`` field renamed to ``metadata_version``
* ``supported_by`` field value ``unmaintained`` has been removed (change to
``community`` and let an external list track whether a module is unmaintained)
* ``supported_by`` field value ``committer`` has been renamed to ``curated``
"""
new_metadata = {'metadata_version': '1.0',
'supported_by': metadata['supported_by'],
'status': metadata['status']
}
if new_metadata['supported_by'] == 'unmaintained':
new_metadata['supported_by'] = 'community'
elif new_metadata['supported_by'] == 'committer':
new_metadata['supported_by'] = 'curated'
return new_metadata
# Subcommands
def add_from_csv(csv_file, version=None, overwrite=False):
"""Implement the subcommand to add metadata from a csv file
"""
# Add metadata for everything from the CSV file
diagnostic_messages = []
for module_name, new_metadata in parse_assigned_metadata_initial(csv_file):
filename = module_loader.find_plugin(module_name, mod_type='.py')
if filename is None:
diagnostic_messages.append('Unable to find the module file for {}'.format(module_name))
continue
try:
write_metadata(filename, new_metadata, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def add_default(version=None, overwrite=False):
"""Implement the subcommand to add default metadata to modules
Add the default metadata to any plugin which lacks it.
:kwarg version: If given, the metadata must be at least this version.
Otherwise, treat the module as not having existing metadata.
:kwarg overwrite: If True, overwrite any existing metadata. Otherwise,
do not modify files which have metadata at an appropriate version
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
# Iterate through each plugin
processed = set()
diagnostic_messages = []
for name, filename in (info for info in plugins if info[0] not in processed):
try:
write_metadata(filename, DEFAULT_METADATA, version, overwrite)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
processed.add(name)
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def upgrade_metadata(version=None):
"""Implement the subcommand to upgrade the default metadata in modules.
:kwarg version: If given, the version of the metadata to upgrade to. If
not given, upgrade to the latest format version.
"""
if version is None:
# Number larger than any of the defined metadata formats.
version = 9999999
requested_version = StrictVersion(version)
# List all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
processed = set()
diagnostic_messages = []
for name, filename in (info for info in plugins if info[0] not in processed):
# For each plugin, read the existing metadata
with open(filename, 'rb') as f:
module_data = f.read()
metadata = extract_metadata(module_data=module_data, offsets=True)[0]
# If the metadata isn't the requested version, convert it to the new
# version
if 'metadata_version' not in metadata or metadata['metadata_version'] != version:
#
# With each iteration of metadata, add a new conditional to
# upgrade from the previous version
#
if 'metadata_version' not in metadata:
# First version, pre-1.0 final metadata
metadata = convert_metadata_pre_1_0_to_1_0(metadata)
if metadata['metadata_version'] == '1.0' and StrictVersion('1.0') < requested_version:
# 1.0 version => XXX. We don't yet have anything beyond 1.0
# so there's nothing here
pass
# Replace the existing metadata with the new format
try:
write_metadata(filename, metadata, version, overwrite=True)
except ParseError as e:
diagnostic_messages.append(e.args[0])
continue
processed.add(name)
if diagnostic_messages:
pprint(diagnostic_messages)
return 0
def report(version=None):
"""Implement the report subcommand
Print out all the modules that have metadata and all the ones that do not.
:kwarg version: If given, the metadata must be at least this version.
Otherwise return it as not having metadata
"""
# List of all plugins
plugins = module_loader.all(path_only=True)
plugins = ((os.path.splitext((os.path.basename(p)))[0], p) for p in plugins)
plugins = (p for p in plugins if p[0] not in NONMODULE_MODULE_NAMES)
plugins = list(plugins)
no_metadata, has_metadata, support, status = metadata_summary(plugins, version=version)
print('== Has metadata ==')
pprint(sorted(has_metadata))
print('')
print('== Has no metadata ==')
pprint(sorted(no_metadata))
print('')
print('== Supported by core ==')
pprint(sorted(support['core']))
print('== Supported by value curated ==')
pprint(sorted(support['curated']))
print('== Supported by community ==')
pprint(sorted(support['community']))
print('')
print('== Status: stableinterface ==')
pprint(sorted(status['stableinterface']))
print('== Status: preview ==')
pprint(sorted(status['preview']))
print('== Status: deprecated ==')
pprint(sorted(status['deprecated']))
print('== Status: removed ==')
pprint(sorted(status['removed']))
print('')
print('== Summary ==')
print('No Metadata: {0} Has Metadata: {1}'.format(len(no_metadata), len(has_metadata)))
print('Supported by core: {0} Supported by community: {1} Supported by value curated: {2}'.format(len(support['core']),
len(support['community']), len(support['curated'])))
print('Status StableInterface: {0} Status Preview: {1} Status Deprecated: {2} Status Removed: {3}'.format(len(status['stableinterface']),
len(status['preview']), len(status['deprecated']), len(status['removed'])))
return 0
if __name__ == '__main__':
action, args = parse_args(sys.argv[1:])
if action == 'report':
rc = report(version=args['version'])
elif action == 'add':
rc = add_from_csv(args['csvfile'], version=args['version'], overwrite=args['overwrite'])
elif action == 'add-default':
rc = add_default(version=args['version'], overwrite=args['overwrite'])
elif action == 'upgrade':
rc = upgrade_metadata(version=args['version'])
sys.exit(rc)
|
sergeysynergy/graph | refs/heads/master | django/manage.py | 152 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
javipalanca/Django-facebook | refs/heads/master | docs/docs_env/Lib/encodings/iso8859_14.py | 593 | """ Python Character Mapping Codec iso8859_14 generated from 'MAPPINGS/ISO8859/8859-14.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-14',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u1e02' # 0xA1 -> LATIN CAPITAL LETTER B WITH DOT ABOVE
u'\u1e03' # 0xA2 -> LATIN SMALL LETTER B WITH DOT ABOVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\u010a' # 0xA4 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u010b' # 0xA5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u1e0a' # 0xA6 -> LATIN CAPITAL LETTER D WITH DOT ABOVE
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u1e80' # 0xA8 -> LATIN CAPITAL LETTER W WITH GRAVE
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u1e82' # 0xAA -> LATIN CAPITAL LETTER W WITH ACUTE
u'\u1e0b' # 0xAB -> LATIN SMALL LETTER D WITH DOT ABOVE
u'\u1ef2' # 0xAC -> LATIN CAPITAL LETTER Y WITH GRAVE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0178' # 0xAF -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u1e1e' # 0xB0 -> LATIN CAPITAL LETTER F WITH DOT ABOVE
u'\u1e1f' # 0xB1 -> LATIN SMALL LETTER F WITH DOT ABOVE
u'\u0120' # 0xB2 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\u0121' # 0xB3 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\u1e40' # 0xB4 -> LATIN CAPITAL LETTER M WITH DOT ABOVE
u'\u1e41' # 0xB5 -> LATIN SMALL LETTER M WITH DOT ABOVE
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\u1e56' # 0xB7 -> LATIN CAPITAL LETTER P WITH DOT ABOVE
u'\u1e81' # 0xB8 -> LATIN SMALL LETTER W WITH GRAVE
u'\u1e57' # 0xB9 -> LATIN SMALL LETTER P WITH DOT ABOVE
u'\u1e83' # 0xBA -> LATIN SMALL LETTER W WITH ACUTE
u'\u1e60' # 0xBB -> LATIN CAPITAL LETTER S WITH DOT ABOVE
u'\u1ef3' # 0xBC -> LATIN SMALL LETTER Y WITH GRAVE
u'\u1e84' # 0xBD -> LATIN CAPITAL LETTER W WITH DIAERESIS
u'\u1e85' # 0xBE -> LATIN SMALL LETTER W WITH DIAERESIS
u'\u1e61' # 0xBF -> LATIN SMALL LETTER S WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0174' # 0xD0 -> LATIN CAPITAL LETTER W WITH CIRCUMFLEX
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u1e6a' # 0xD7 -> LATIN CAPITAL LETTER T WITH DOT ABOVE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0176' # 0xDE -> LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0175' # 0xF0 -> LATIN SMALL LETTER W WITH CIRCUMFLEX
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u1e6b' # 0xF7 -> LATIN SMALL LETTER T WITH DOT ABOVE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0177' # 0xFE -> LATIN SMALL LETTER Y WITH CIRCUMFLEX
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
anthraxx/arch-security-tracker | refs/heads/master | tracker/model/user.py | 3 | from tracker import db
from .enum import UserRole
username_regex = r'^([\w]+)$'
class User(db.Model):
NAME_LENGTH = 32
EMAIL_LENGTH = 128
SALT_LENGTH = 20
PASSWORD_LENGTH = 80
TOKEN_LENGTH = 120
__tablename__ = 'user'
id = db.Column(db.Integer(), index=True, unique=True, primary_key=True, autoincrement=True)
name = db.Column(db.String(NAME_LENGTH), index=True, unique=True, nullable=False)
email = db.Column(db.String(EMAIL_LENGTH), index=True, unique=True, nullable=False)
salt = db.Column(db.String(SALT_LENGTH), nullable=False)
password = db.Column(db.String(SALT_LENGTH), nullable=False)
token = db.Column(db.String(TOKEN_LENGTH), index=True, unique=True, nullable=True)
role = db.Column(UserRole.as_type(), nullable=False, default=UserRole.reporter)
active = db.Column(db.Boolean(), nullable=False, default=True)
is_authenticated = False
is_anonymous = False
@property
def is_active(self):
return self.active
def get_id(self):
return '{}'.format(self.token)
def __str__(self):
return self.name
def __repr__(self):
return '<User %r>' % (self.name)
class Guest(User):
def __init__(self):
super().__init__()
self.name = 'Guest'
self.id = -42
self.active = False
self.is_anonymous = True
self.is_authenticated = False
self.role = UserRole.guest
def get_id(self):
return None
|
stewartpark/django | refs/heads/master | django/db/backends/postgresql/creation.py | 121 | import sys
from django.db.backends.base.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def sql_table_creation_suffix(self):
test_settings = self.connection.settings_dict['TEST']
assert test_settings['COLLATION'] is None, (
"PostgreSQL does not support collation setting at database creation time."
)
if test_settings['CHARSET']:
return "WITH ENCODING '%s'" % test_settings['CHARSET']
return ''
def _clone_test_db(self, number, verbosity, keepdb=False):
# CREATE DATABASE ... WITH TEMPLATE ... requires closing connections
# to the template database.
self.connection.close()
qn = self.connection.ops.quote_name
source_database_name = self.connection.settings_dict['NAME']
target_database_name = self.get_test_db_clone_settings(number)['NAME']
with self._nodb_connection.cursor() as cursor:
try:
cursor.execute("CREATE DATABASE %s WITH TEMPLATE %s" % (
qn(target_database_name), qn(source_database_name)))
except Exception as e:
if keepdb:
return
try:
if verbosity >= 1:
print("Destroying old test database for alias %s..." % (
self._get_database_display_str(verbosity, target_database_name),
))
cursor.execute("DROP DATABASE %s" % qn(target_database_name))
cursor.execute("CREATE DATABASE %s WITH TEMPLATE %s" % (
qn(target_database_name), qn(source_database_name)))
except Exception as e:
sys.stderr.write("Got an error cloning the test database: %s\n" % e)
sys.exit(2)
|
3DP-Unlimited/3DP-Printrun | refs/heads/updated-pengpod1000 | printrun/cairosvg/surface/__init__.py | 2 | # -*- coding: utf-8 -*-
# This file is part of CairoSVG
# Copyright © 2010-2012 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with CairoSVG. If not, see <http://www.gnu.org/licenses/>.
"""
Cairo surface creators.
"""
import cairo
import io
from ..parser import Tree
from .colors import color
from .defs import gradient_or_pattern, parse_def
from .helpers import (
node_format, transform, normalize, filter_fill_or_stroke,
apply_matrix_transform, PointError)
from .path import PATH_TAGS
from .tags import TAGS
from .units import size
from . import units
class Surface(object):
"""Abstract base class for CairoSVG surfaces.
The ``width`` and ``height`` attributes are in device units (pixels for
PNG, else points).
The ``context_width`` and ``context_height`` attributes are in user units
(i.e. in pixels), they represent the size of the active viewport.
"""
# Subclasses must either define this or override _create_surface()
surface_class = None
@classmethod
def convert(cls, bytestring = None, **kwargs):
"""Convert a SVG document to the format for this class.
Specify the input by passing one of these:
:param bytestring: The SVG source as a byte-string.
:param file_obj: A file-like object.
:param url: A filename.
And the output with:
:param write_to: The filename of file-like object where to write the
output. If None or not provided, return a byte string.
Only ``source`` can be passed as a positional argument, other
parameters are keyword-only.
"""
dpi = kwargs.pop('dpi', 96)
write_to = kwargs.pop('write_to', None)
kwargs['bytestring'] = bytestring
tree = Tree(**kwargs)
if write_to is None:
output = io.BytesIO()
else:
output = write_to
cls(tree, output, dpi).finish()
if write_to is None:
return output.getvalue()
def __init__(self, tree, output, dpi):
"""Create the surface from a filename or a file-like object.
The rendered content is written to ``output`` which can be a filename,
a file-like object, ``None`` (render in memory but do not write
anything) or the built-in ``bytes`` as a marker.
Call the ``.finish()`` method to make sure that the output is
actually written.
"""
self.cairo = None
self.context_width, self.context_height = None, None
self.cursor_position = 0, 0
self.total_width = 0
self.markers = {}
self.gradients = {}
self.patterns = {}
self.paths = {}
self.page_sizes = []
self._old_parent_node = self.parent_node = None
self.output = output
self.dpi = dpi
self.font_size = size(self, "12pt")
width, height, viewbox = node_format(self, tree)
# Actual surface dimensions: may be rounded on raster surfaces types
self.cairo, self.width, self.height = self._create_surface(
width * self.device_units_per_user_units,
height * self.device_units_per_user_units)
self.page_sizes.append((self.width, self.height))
self.context = cairo.Context(self.cairo)
# We must scale the context as the surface size is using physical units
self.context.scale(
self.device_units_per_user_units, self.device_units_per_user_units)
# Initial, non-rounded dimensions
self.set_context_size(width, height, viewbox)
self.context.move_to(0, 0)
self.draw_root(tree)
@property
def points_per_pixel(self):
"""Surface resolution."""
return 1 / (self.dpi * units.UNITS["pt"])
@property
def device_units_per_user_units(self):
"""Ratio between Cairo device units and user units.
Device units are points for everything but PNG, and pixels for
PNG. User units are pixels.
"""
return self.points_per_pixel
def _create_surface(self, width, height):
"""Create and return ``(cairo_surface, width, height)``."""
# self.surface_class should not be None when called here
# pylint: disable=E1102
cairo_surface = self.surface_class(self.output, width, height)
# pylint: enable=E1102
return cairo_surface, width, height
def set_context_size(self, width, height, viewbox):
"""Set the Cairo context size, set the SVG viewport size."""
if viewbox:
x, y, x_size, y_size = viewbox
self.context_width, self.context_height = x_size, y_size
x_ratio, y_ratio = width / x_size, height / y_size
matrix = cairo.Matrix()
if x_ratio > y_ratio:
matrix.translate((width - x_size * y_ratio) / 2, 0)
matrix.scale(y_ratio, y_ratio)
matrix.translate(-x, -y / y_ratio * x_ratio)
elif x_ratio < y_ratio:
matrix.translate(0, (height - y_size * x_ratio) / 2)
matrix.scale(x_ratio, x_ratio)
matrix.translate(-x / x_ratio * y_ratio, -y)
else:
matrix.scale(x_ratio, y_ratio)
matrix.translate(-x, -y)
apply_matrix_transform(self, matrix)
else:
self.context_width, self.context_height = width, height
def finish(self):
"""Read the surface content."""
self.cairo.finish()
def draw_root(self, node):
"""Draw the root ``node``."""
self.draw(node)
def draw(self, node, stroke_and_fill = True):
"""Draw ``node`` and its children."""
old_font_size = self.font_size
self.font_size = size(self, node.get("font-size", "12pt"))
# Do not draw defs
if node.tag == "defs":
for child in node.children:
parse_def(self, child)
return
# Do not draw elements with width or height of 0
if (("width" in node and size(self, node["width"]) == 0) or
("height" in node and size(self, node["height"]) == 0)):
return
node.tangents = [None]
node.pending_markers = []
self._old_parent_node = self.parent_node
self.parent_node = node
opacity = float(node.get("opacity", 1))
if opacity < 1:
self.context.push_group()
self.context.save()
self.context.move_to(
size(self, node.get("x"), "x"),
size(self, node.get("y"), "y"))
# Transform the context according to the ``transform`` attribute
transform(self, node.get("transform"))
if node.tag in PATH_TAGS:
# Set 1 as default stroke-width
if not node.get("stroke-width"):
node["stroke-width"] = "1"
# Set node's drawing informations if the ``node.tag`` method exists
line_cap = node.get("stroke-linecap")
if line_cap == "square":
self.context.set_line_cap(cairo.LINE_CAP_SQUARE)
if line_cap == "round":
self.context.set_line_cap(cairo.LINE_CAP_ROUND)
join_cap = node.get("stroke-linejoin")
if join_cap == "round":
self.context.set_line_join(cairo.LINE_JOIN_ROUND)
if join_cap == "bevel":
self.context.set_line_join(cairo.LINE_JOIN_BEVEL)
dash_array = normalize(node.get("stroke-dasharray", "")).split()
if dash_array:
dashes = [size(self, dash) for dash in dash_array]
if sum(dashes):
offset = size(self, node.get("stroke-dashoffset"))
self.context.set_dash(dashes, offset)
miter_limit = float(node.get("stroke-miterlimit", 4))
self.context.set_miter_limit(miter_limit)
if node.tag in TAGS:
try:
TAGS[node.tag](self, node)
except PointError:
# Error in point parsing, do nothing
pass
# Get stroke and fill opacity
stroke_opacity = float(node.get("stroke-opacity", 1))
fill_opacity = float(node.get("fill-opacity", 1))
# Manage dispaly and visibility
display = node.get("display", "inline") != "none"
visible = display and (node.get("visibility", "visible") != "hidden")
if stroke_and_fill and visible:
# Fill
if "url(#" in (node.get("fill") or ""):
name = filter_fill_or_stroke(node.get("fill"))
gradient_or_pattern(self, node, name)
else:
if node.get("fill-rule") == "evenodd":
self.context.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
self.context.set_source_rgba(
*color(node.get("fill", "black"), fill_opacity))
self.context.fill_preserve()
# Stroke
self.context.set_line_width(size(self, node.get("stroke-width")))
if "url(#" in (node.get("stroke") or ""):
name = filter_fill_or_stroke(node.get("stroke"))
gradient_or_pattern(self, node, name)
else:
self.context.set_source_rgba(
*color(node.get("stroke"), stroke_opacity))
self.context.stroke()
elif not visible:
self.context.new_path()
# Draw children
if display and node.tag not in (
"linearGradient", "radialGradient", "marker", "pattern"):
for child in node.children:
self.draw(child, stroke_and_fill)
if not node.root:
# Restoring context is useless if we are in the root tag, it may
# raise an exception if we have multiple svg tags
self.context.restore()
if opacity < 1:
self.context.pop_group_to_source()
self.context.paint_with_alpha(opacity)
self.parent_node = self._old_parent_node
self.font_size = old_font_size
class MultipageSurface(Surface):
"""Abstract base class for surfaces that can handle multiple pages."""
def draw_root(self, node):
self.width = None
self.height = None
svg_children = [child for child in node.children if child.tag == 'svg']
if svg_children:
# Multi-page
for page in svg_children:
width, height, viewbox = node_format(self, page)
self.context.save()
self.set_context_size(width, height, viewbox)
width *= self.device_units_per_user_units
height *= self.device_units_per_user_units
self.page_sizes.append((width, height))
self.cairo.set_size(width, height)
self.draw(page)
self.context.restore()
self.cairo.show_page()
else:
self.draw(node)
class PDFSurface(MultipageSurface):
"""A surface that writes in PDF format."""
surface_class = cairo.PDFSurface
class PSSurface(MultipageSurface):
"""A surface that writes in PostScript format."""
surface_class = cairo.PSSurface
class PNGSurface(Surface):
"""A surface that writes in PNG format."""
device_units_per_user_units = 1
def _create_surface(self, width, height):
"""Create and return ``(cairo_surface, width, height)``."""
width = int(width)
height = int(height)
cairo_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
return cairo_surface, width, height
def finish(self):
"""Read the PNG surface content."""
if self.output is not None:
self.cairo.write_to_png(self.output)
return super(PNGSurface, self).finish()
class SVGSurface(Surface):
"""A surface that writes in SVG format.
It may seem pointless to render SVG to SVG, but this can be used
with ``output=None`` to get a vector-based single page cairo surface.
"""
surface_class = cairo.SVGSurface
|
weiweihuanghuang/Glyphs-Scripts | refs/heads/master | Anchors/Anchor Mover.py | 2 | #MenuTitle: Anchor Mover 1.1
# -*- coding: utf-8 -*-
__doc__="""
Vertically move anchors in selected glyphs (GUI).
"""
import GlyphsApp
import vanilla
class AnchorMover(object):
def __init__(self):
self.w = vanilla.FloatingWindow((340, 40), "Move anchors")
self.w.text_anchor = vanilla.TextBox((15, 12+2, 45, 14), "Move", sizeStyle='small')
self.w.anchor_name = vanilla.PopUpButton((50, 12, 80, 17), self.GetAnchorNames(), sizeStyle='small', callback=self.AnchorChangeCallback)
self.w.text_value = vanilla.TextBox((135, 12+2, 55, 14), "to height", sizeStyle='small')
self.w.anchor_value = vanilla.EditText((190, 12, 50, 19), "0.0", sizeStyle='small')
self.w.movebutton = vanilla.Button((-80, 12+1, -15, 17), "Move", sizeStyle='small', callback=self.buttonCallback)
self.w.setDefaultButton( self.w.movebutton )
self.w.open()
self.AnchorChangeCallback( self )
def ValuePlus1(self, sender):
anchor_y = float( self.w.anchor_value.get() )
self.w.anchor_value.set( str(anchor_y + 1.0) )
def AnchorChangeCallback(self, sender):
anchor_index = self.w.anchor_name.get()
anchor_name = str( self.w.anchor_name.getItems()[anchor_index] )
selectedLayers = Glyphs.font.selectedLayers
thisLayer = [ x for x in selectedLayers if x.anchors[anchor_name] ][0] # first available glyph that has this anchor
x = str( thisLayer.anchors[anchor_name].y ) # get its anchor value
self.w.anchor_value.set( x )
def buttonCallback(self, sender):
selectedLayers = Glyphs.font.selectedLayers
print "Processing %i glyphs..." % len( selectedLayers )
anchor_index = self.w.anchor_name.get()
anchor_name = str( self.w.anchor_name.getItems()[anchor_index] )
try:
anchor_y = float( self.w.anchor_value.get() )
except:
anchor_y = 0.0
# print anchor_index, anchor_name, anchor_y #DEBUG
for thisLayer in selectedLayers:
# print "Changing %s in %s..." % (anchor_name, thisLayer.parent.name) #DEBUG
try:
if len( thisLayer.anchors ) > 0:
thisLayer.setDisableUpdates()
for thisAnchor in thisLayer.anchors:
if thisAnchor.name == anchor_name:
old_anchor_y = thisAnchor.y
if old_anchor_y != anchor_y:
thisAnchor.y = anchor_y
print "Moved %s anchor in %s from %s to %s." % ( anchor_name, thisLayer.parent.name, old_anchor_y, thisAnchor.y )
except:
print "Error: Failed to move anchor in %s to %s." % ( thisLayer.parent.name, anchor_y )
finally:
thisLayer.setEnableUpdates()
print "Done."
def GetAnchorNames(self):
myAnchorList = []
selectedLayers = Glyphs.font.selectedLayers
for thisLayer in selectedLayers:
AnchorNames = list( thisLayer.anchors.keys() ) # hack to avoid traceback
for thisAnchorName in AnchorNames:
if thisAnchorName not in myAnchorList:
myAnchorList.append( str(thisAnchorName) )
return sorted( myAnchorList )
AnchorMover()
|
faroos3/QuoteR | refs/heads/master | client/diff_checker/DiffWord.py | 2 | '''
This is going the be the different word class
used to help evaluate which words are
different, and which are not.
'''
class DiffWord:
# all the words should be DiffWords
def __init__(self, OWord="", isDiff=True, idex=[-1, -1]):
# default constructor for a word for some reason.
self.word = OWord # what the word is
self.isDifferent = isDiff # if the word is different or nah
self.index = idex # the first position
# of the list is the position of the
# word in the original file, the second position of it is in the
# derived file.
def __str__(self):
return self.word
# Observers
def getWord(self): # get the word
ans = self.word
return ans
def isDiff(self):
return self.isDifferent is True
def getIndex(self):
return self.index
def get_pos_in_original(self):
return self.index[0]
def get_pos_in_derived(self):
return self.index[1]
|
BorgERP/borg-erp-6of3 | refs/heads/master | addons/edi/models/edi.py | 3 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import hashlib
import json
import logging
import re
import threading
import time
import urllib2
import openerp
import openerp.release as release
import netsvc
import pooler
from osv import osv,fields,orm
from tools.translate import _
from tools.safe_eval import safe_eval as eval
EXTERNAL_ID_PATTERN = re.compile(r'^([^.:]+)(?::([^.]+))?\.(\S+)$')
EDI_VIEW_WEB_URL = '%s/edi/view?db=%s&token=%s'
EDI_PROTOCOL_VERSION = 1 # arbitrary ever-increasing version number
EDI_GENERATOR = 'OpenERP ' + release.major_version
EDI_GENERATOR_VERSION = release.version_info
def split_external_id(ext_id):
match = EXTERNAL_ID_PATTERN.match(ext_id)
assert match, \
_("'%s' is an invalid external ID") % (ext_id)
return {'module': match.group(1),
'db_uuid': match.group(2),
'id': match.group(3),
'full': match.group(0)}
def safe_unique_id(database_id, model, record_id):
"""Generate a unique string to represent a (database_uuid,model,record_id) pair
without being too long, and with a very low probability of collisions.
"""
msg = "%s-%s-%s-%s" % (time.time(), database_id, model, record_id)
digest = hashlib.sha1(msg).digest()
# fold the sha1 20 bytes digest to 9 bytes
digest = ''.join(chr(ord(x) ^ ord(y)) for (x,y) in zip(digest[:9], digest[9:-2]))
# b64-encode the 9-bytes folded digest to a reasonable 12 chars ASCII ID
digest = base64.urlsafe_b64encode(digest)
return '%s-%s' % (model.replace('.','_'), digest)
def last_update_for(record):
"""Returns the last update timestamp for the given record,
if available, otherwise False
"""
if record._model._log_access:
record_log = record.perm_read()[0]
return record_log.get('write_date') or record_log.get('create_date') or False
return False
_logger = logging.getLogger('edi')
class edi_document(osv.osv):
_name = 'edi.document'
_description = 'EDI Document'
_columns = {
'name': fields.char("EDI token", size = 128, help="Unique identifier for retrieving an EDI document."),
'document': fields.text("Document", help="EDI document content")
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'EDI Tokens must be unique!')
]
def new_edi_token(self, cr, uid, record):
"""Return a new, random unique token to identify this model record,
and to be used as token when exporting it as an EDI document.
:param browse_record record: model record for which a token is needed
"""
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
edi_token = hashlib.sha256('%s-%s-%s-%s' % (time.time(), db_uuid, record._name, record.id)).hexdigest()
return edi_token
def serialize(self, edi_documents):
"""Serialize the given EDI document structures (Python dicts holding EDI data),
using JSON serialization.
:param [dict] edi_documents: list of EDI document structures to serialize
:return: UTF-8 encoded string containing the serialized document
"""
serialized_list = json.dumps(edi_documents)
return serialized_list
def generate_edi(self, cr, uid, records, context=None):
"""Generates a final EDI document containing the EDI serialization
of the given records, which should all be instances of a Model
that has the :meth:`~.edi` mixin. The document is not saved in the
database, this is done by :meth:`~.export_edi`.
:param list(browse_record) records: records to export as EDI
:return: UTF-8 encoded string containing the serialized records
"""
edi_list = []
for record in records:
record_model_obj = self.pool.get(record._name)
edi_list += record_model_obj.edi_export(cr, uid, [record], context=context)
return self.serialize(edi_list)
def get_document(self, cr, uid, edi_token, context=None):
"""Retrieve the EDI document corresponding to the given edi_token.
:return: EDI document string
:raise: ValueError if requested EDI token does not match any know document
"""
_logger.debug("get_document(%s)", edi_token)
edi_ids = self.search(cr, uid, [('name','=', edi_token)], context=context)
if not edi_ids:
raise ValueError('Invalid EDI token: %s' % edi_token)
edi = self.browse(cr, uid, edi_ids[0], context=context)
return edi.document
def load_edi(self, cr, uid, edi_documents, context=None):
"""Import the given EDI document structures into the system, using
:meth:`~.import_edi`.
:param edi_documents: list of Python dicts containing the deserialized
version of EDI documents
:return: list of (model, id, action) tuple containing the model and database ID
of all records that were imported in the system, plus a suggested
action definition dict for displaying each document.
"""
ir_module = self.pool.get('ir.module.module')
res = []
for edi_document in edi_documents:
module = edi_document.get('__import_module') or edi_document.get('__module')
assert module, 'a `__module` or `__import_module` attribute is required in each EDI document'
if module != 'base' and not ir_module.search(cr, uid, [('name','=',module),('state','=','installed')]):
raise osv.except_osv(_('Missing Application'),
_("The document you are trying to import requires the OpenERP `%s` application. "
"You can install it by connecting as the administrator and opening the configuration assistant.")%(module,))
model = edi_document.get('__import_model') or edi_document.get('__model')
assert model, 'a `__model` or `__import_model` attribute is required in each EDI document'
model_obj = self.pool.get(model)
assert model_obj, 'model `%s` cannot be found, despite module `%s` being available - '\
'this EDI document seems invalid or unsupported' % (model,module)
record_id = model_obj.edi_import(cr, uid, edi_document, context=context)
record_action = model_obj._edi_record_display_action(cr, uid, record_id, context=context)
res.append((model, record_id, record_action))
return res
def deserialize(self, edi_documents_string):
"""Return deserialized version of the given EDI Document string.
:param str|unicode edi_documents_string: UTF-8 string (or unicode) containing
JSON-serialized EDI document(s)
:return: Python object representing the EDI document(s) (usually a list of dicts)
"""
return json.loads(edi_documents_string)
def export_edi(self, cr, uid, records, context=None):
"""Export the given database records as EDI documents, stores them
permanently with a new unique EDI token, for later retrieval via :meth:`~.get_document`,
and returns the list of the new corresponding ``ir.edi.document`` records.
:param records: list of browse_record of any model
:return: list of IDs of the new ``ir.edi.document`` entries, in the same
order as the provided ``records``.
"""
exported_ids = []
for record in records:
document = self.generate_edi(cr, uid, [record], context)
token = self.new_edi_token(cr, uid, record)
self.create(cr, uid, {
'name': token,
'document': document
}, context=context)
exported_ids.append(token)
return exported_ids
def import_edi(self, cr, uid, edi_document=None, edi_url=None, context=None):
"""Import a JSON serialized EDI Document string into the system, first retrieving it
from the given ``edi_url`` if provided.
:param str|unicode edi_document: UTF-8 string or unicode containing JSON-serialized
EDI Document to import. Must not be provided if
``edi_url`` is given.
:param str|unicode edi_url: URL where the EDI document (same format as ``edi_document``)
may be retrieved, without authentication.
"""
if edi_url:
assert not edi_document, 'edi_document must not be provided if edi_url is given'
edi_document = urllib2.urlopen(edi_url).read()
assert edi_document, 'EDI Document is empty!'
edi_documents = self.deserialize(edi_document)
return self.load_edi(cr, uid, edi_documents, context=context)
class EDIMixin(object):
"""Mixin class for Model objects that want be exposed as EDI documents.
Classes that inherit from this mixin class should override the
``edi_import()`` and ``edi_export()`` methods to implement their
specific behavior, based on the primitives provided by this mixin."""
def _edi_requires_attributes(self, attributes, edi_document):
model_name = edi_document.get('__imported_model') or edi_document.get('__model') or self._name
for attribute in attributes:
assert edi_document.get(attribute),\
'Attribute `%s` is required in %s EDI documents' % (attribute, model_name)
# private method, not RPC-exposed as it creates ir.model.data entries as
# SUPERUSER based on its parameters
def _edi_external_id(self, cr, uid, record, existing_id=None, existing_module=None,
context=None):
"""Generate/Retrieve unique external ID for ``record``.
Each EDI record and each relationship attribute in it is identified by a
unique external ID, which includes the database's UUID, as a way to
refer to any record within any OpenERP instance, without conflict.
For OpenERP records that have an existing "External ID" (i.e. an entry in
ir.model.data), the EDI unique identifier for this record will be made of
"%s:%s:%s" % (module, database UUID, ir.model.data ID). The database's
UUID MUST NOT contain a colon characters (this is guaranteed by the
UUID algorithm).
For records that have no existing ir.model.data entry, a new one will be
created during the EDI export. It is recommended that the generated external ID
contains a readable reference to the record model, plus a unique value that
hides the database ID. If ``existing_id`` is provided (because it came from
an import), it will be used instead of generating a new one.
If ``existing_module`` is provided (because it came from
an import), it will be used instead of using local values.
:param browse_record record: any browse_record needing an EDI external ID
:param string existing_id: optional existing external ID value, usually coming
from a just-imported EDI record, to be used instead
of generating a new one
:param string existing_module: optional existing module name, usually in the
format ``module:db_uuid`` and coming from a
just-imported EDI record, to be used instead
of local values
:return: the full unique External ID to use for record
"""
ir_model_data = self.pool.get('ir.model.data')
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
ext_id = record.get_external_id()[record.id]
if not ext_id:
ext_id = existing_id or safe_unique_id(db_uuid, record._name, record.id)
# ID is unique cross-db thanks to db_uuid (already included in existing_module)
module = existing_module or "%s:%s" % (record._original_module, db_uuid)
_logger.debug("%s: Generating new external ID `%s.%s` for %r", self._name,
module, ext_id, record)
ir_model_data.create(cr, openerp.SUPERUSER_ID,
{'name': ext_id,
'model': record._name,
'module': module,
'res_id': record.id})
else:
module, ext_id = ext_id.split('.')
if not ':' in module:
# this record was not previously EDI-imported
if not module == record._original_module:
# this could happen for data records defined in a module that depends
# on the module that owns the model, e.g. purchase defines
# product.pricelist records.
_logger.debug('Mismatching module: expected %s, got %s, for %s',
module, record._original_module, record)
# ID is unique cross-db thanks to db_uuid
module = "%s:%s" % (module, db_uuid)
return '%s.%s' % (module, ext_id)
def _edi_record_display_action(self, cr, uid, id, context=None):
"""Returns an appropriate action definition dict for displaying
the record with ID ``rec_id``.
:param int id: database ID of record to display
:return: action definition dict
"""
return {'type': 'ir.actions.act_window',
'view_mode': 'form,tree',
'view_type': 'form',
'res_model': self._name,
'res_id': id}
def edi_metadata(self, cr, uid, records, context=None):
"""Return a list containing the boilerplate EDI structures for
exporting ``records`` as EDI, including
the metadata fields
The metadata fields always include::
{
'__model': 'some.model', # record model
'__module': 'module', # require module
'__id': 'module:db-uuid:model.id', # unique global external ID for the record
'__last_update': '2011-01-01 10:00:00', # last update date in UTC!
'__version': 1, # EDI spec version
'__generator' : 'OpenERP', # EDI generator
'__generator_version' : [6,1,0], # server version, to check compatibility.
'__attachments_':
}
:param list(browse_record) records: records to export
:return: list of dicts containing boilerplate EDI metadata for each record,
at the corresponding index from ``records``.
"""
data_ids = []
ir_attachment = self.pool.get('ir.attachment')
results = []
for record in records:
ext_id = self._edi_external_id(cr, uid, record, context=context)
edi_dict = {
'__id': ext_id,
'__last_update': last_update_for(record),
'__model' : record._name,
'__module' : record._original_module,
'__version': EDI_PROTOCOL_VERSION,
'__generator': EDI_GENERATOR,
'__generator_version': EDI_GENERATOR_VERSION,
}
attachment_ids = ir_attachment.search(cr, uid, [('res_model','=', record._name), ('res_id', '=', record.id)])
if attachment_ids:
attachments = []
for attachment in ir_attachment.browse(cr, uid, attachment_ids, context=context):
attachments.append({
'name' : attachment.name,
'content': attachment.datas, # already base64 encoded!
'file_name': attachment.datas_fname,
})
edi_dict.update(__attachments=attachments)
results.append(edi_dict)
return results
def edi_m2o(self, cr, uid, record, context=None):
"""Return a m2o EDI representation for the given record.
The EDI format for a many2one is::
['unique_external_id', 'Document Name']
"""
edi_ext_id = self._edi_external_id(cr, uid, record, context=context)
relation_model = record._model
name = relation_model.name_get(cr, uid, [record.id], context=context)
name = name and name[0][1] or False
return [edi_ext_id, name]
def edi_o2m(self, cr, uid, records, edi_struct=None, context=None):
"""Return a list representing a O2M EDI relationship containing
all the given records, according to the given ``edi_struct``.
This is basically the same as exporting all the record using
:meth:`~.edi_export` with the given ``edi_struct``, and wrapping
the results in a list.
Example::
[ # O2M fields would be a list of dicts, with their
{ '__id': 'module:db-uuid.id', # own __id.
'__last_update': 'iso date', # update date
'name': 'some name',
#...
},
# ...
],
"""
result = []
for record in records:
result += record._model.edi_export(cr, uid, [record], edi_struct=edi_struct, context=context)
return result
def edi_m2m(self, cr, uid, records, context=None):
"""Return a list representing a M2M EDI relationship directed towards
all the given records.
This is basically the same as exporting all the record using
:meth:`~.edi_m2o` and wrapping the results in a list.
Example::
# M2M fields are exported as a list of pairs, like a list of M2O values
[
['module:db-uuid.id1', 'Task 01: bla bla'],
['module:db-uuid.id2', 'Task 02: bla bla']
]
"""
return [self.edi_m2o(cr, uid, r, context=context) for r in records]
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Returns a list of dicts representing an edi.document containing the
records, and matching the given ``edi_struct``, if provided.
:param edi_struct: if provided, edi_struct should be a dictionary
with a skeleton of the fields to export.
Basic fields can have any key as value, but o2m
values should have a sample skeleton dict as value,
to act like a recursive export.
For example, for a res.partner record::
edi_struct: {
'name': True,
'company_id': True,
'address': {
'name': True,
'street': True,
}
}
Any field not specified in the edi_struct will not
be included in the exported data. Fields with no
value (False) will be omitted in the EDI struct.
If edi_struct is omitted, no fields will be exported
"""
if edi_struct is None:
edi_struct = {}
fields_to_export = edi_struct.keys()
results = []
for record in records:
edi_dict = self.edi_metadata(cr, uid, [record], context=context)[0]
for field in fields_to_export:
column = self._all_columns[field].column
value = getattr(record, field)
if not value and value not in ('', 0):
continue
elif column._type == 'many2one':
value = self.edi_m2o(cr, uid, value, context=context)
elif column._type == 'many2many':
value = self.edi_m2m(cr, uid, value, context=context)
elif column._type == 'one2many':
value = self.edi_o2m(cr, uid, value, edi_struct=edi_struct.get(field, {}), context=context)
edi_dict[field] = value
results.append(edi_dict)
return results
def edi_export_and_email(self, cr, uid, ids, template_ext_id, context=None):
"""Export the given records just like :meth:`~.export_edi`, the render the
given email template, in order to trigger appropriate notifications.
This method is intended to be called as part of business documents'
lifecycle, so it silently ignores any error occurring during the process,
as this is usually non-critical. To avoid any delay, it is also asynchronous
and will spawn a short-lived thread to perform the action.
:param str template_ext_id: external id of the email.template to use for
the mail notifications
:return: True
"""
def email_task():
db = pooler.get_db(cr.dbname)
local_cr = None
try:
# lame workaround to wait for commit of parent transaction
wait_try, wait_max_try = 0, 50
while not cr._Cursor__closed and wait_try < wait_max_try:
time.sleep(3)
wait_try += 1
# grab a fresh browse_record on local cursor
local_cr = db.cursor()
web_root_url = self.pool.get('ir.config_parameter').get_param(local_cr, uid, 'web.base.url')
if not web_root_url:
_logger.warning('Ignoring EDI mail notification, web.base.url not defined in parameters')
return
mail_tmpl = self._edi_get_object_by_external_id(local_cr, uid, template_ext_id, 'email.template', context=context)
if not mail_tmpl:
# skip EDI export if the template was not found
_logger.warning('Ignoring EDI mail notification, template %s cannot be located', template_ext_id)
return
for edi_record in self.browse(local_cr, uid, ids, context=context):
edi_context = dict(context, edi_web_url_view=self._edi_get_object_web_url_view(local_cr, uid, edi_record, context=context))
self.pool.get('email.template').send_mail(local_cr, uid, mail_tmpl.id, edi_record.id,
force_send=False, context=edi_context)
_logger.info('EDI export successful for %s #%s, email notification sent.', self._name, edi_record.id)
except Exception:
_logger.warning('Ignoring EDI mail notification, failed to generate it.', exc_info=True)
finally:
if local_cr:
local_cr.commit()
local_cr.close()
threading.Thread(target=email_task, name='EDI ExportAndEmail for %s %r' % (self._name, ids)).start()
return True
def _edi_get_object_web_url_view(self, cr, uid, record, context=None):
web_root_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
if not web_root_url:
_logger.warning('Ignoring EDI mail notification, web.base.url not defined in parameters')
return ''
edi_token = self.pool.get('edi.document').export_edi(cr, uid, [record], context=context)[0]
return EDI_VIEW_WEB_URL % (web_root_url, cr.dbname, edi_token)
def _edi_get_object_by_name(self, cr, uid, name, model_name, context=None):
model = self.pool.get(model_name)
search_results = model.name_search(cr, uid, name, operator='=', context=context)
if len(search_results) == 1:
return model.browse(cr, uid, search_results[0][0], context=context)
return False
def _edi_generate_report_attachment(self, cr, uid, record, context=None):
"""Utility method to generate the first PDF-type report declared for the
current model with ``usage`` attribute set to ``default``.
This must be called explicitly by models that need it, usually
at the beginning of ``edi_export``, before the call to ``super()``."""
ir_actions_report = self.pool.get('ir.actions.report.xml')
matching_reports = ir_actions_report.search(cr, uid, [('model','=',self._name),
('report_type','=','pdf'),
('usage','=','default')])
if matching_reports:
report = ir_actions_report.browse(cr, uid, matching_reports[0])
report_service = 'report.' + report.report_name
service = netsvc.LocalService(report_service)
(result, format) = service.create(cr, uid, [record.id], {'model': self._name}, context=context)
eval_context = {'time': time, 'object': record}
if not report.attachment or not eval(report.attachment, eval_context):
# no auto-saving of report as attachment, need to do it manually
result = base64.b64encode(result)
file_name = record.name_get()[0][1]
file_name = re.sub(r'[^a-zA-Z0-9_-]', '_', file_name)
file_name += ".pdf"
ir_attachment = self.pool.get('ir.attachment').create(cr, uid,
{'name': file_name,
'datas': result,
'datas_fname': file_name,
'res_model': self._name,
'res_id': record.id},
context=context)
def _edi_import_attachments(self, cr, uid, record_id, edi_document, context=None):
ir_attachment = self.pool.get('ir.attachment')
for attachment in edi_document.get('__attachments', []):
# check attachment data is non-empty and valid
file_data = None
try:
file_data = base64.b64decode(attachment.get('content'))
except TypeError:
pass
assert file_data, 'Incorrect/Missing attachment file content'
assert attachment.get('name'), 'Incorrect/Missing attachment name'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name'
assert attachment.get('file_name'), 'Incorrect/Missing attachment file name'
ir_attachment.create(cr, uid, {'name': attachment['name'],
'datas_fname': attachment['file_name'],
'res_model': self._name,
'res_id': record_id,
# should be pure 7bit ASCII
'datas': str(attachment['content']),
}, context=context)
def _edi_get_object_by_external_id(self, cr, uid, external_id, model, context=None):
"""Returns browse_record representing object identified by the model and external_id,
or None if no record was found with this external id.
:param external_id: fully qualified external id, in the EDI form
``module:db_uuid:identifier``.
:param model: model name the record belongs to.
"""
ir_model_data = self.pool.get('ir.model.data')
# external_id is expected to have the form: ``module:db_uuid:model.random_name``
ext_id_members = split_external_id(external_id)
db_uuid = self.pool.get('ir.config_parameter').get_param(cr, uid, 'database.uuid')
module = ext_id_members['module']
ext_id = ext_id_members['id']
modules = []
ext_db_uuid = ext_id_members['db_uuid']
if ext_db_uuid:
modules.append('%s:%s' % (module, ext_id_members['db_uuid']))
if ext_db_uuid is None or ext_db_uuid == db_uuid:
# local records may also be registered without the db_uuid
modules.append(module)
data_ids = ir_model_data.search(cr, uid, [('model','=',model),
('name','=',ext_id),
('module','in',modules)])
if data_ids:
model = self.pool.get(model)
data = ir_model_data.browse(cr, uid, data_ids[0], context=context)
result = model.browse(cr, uid, data.res_id, context=context)
return result
def edi_import_relation(self, cr, uid, model, value, external_id, context=None):
"""Imports a M2O/M2M relation EDI specification ``[external_id,value]`` for the
given model, returning the corresponding database ID:
* First, checks if the ``external_id`` is already known, in which case the corresponding
database ID is directly returned, without doing anything else;
* If the ``external_id`` is unknown, attempts to locate an existing record
with the same ``value`` via name_search(). If found, the given external_id will
be assigned to this local record (in addition to any existing one)
* If previous steps gave no result, create a new record with the given
value in the target model, assign it the given external_id, and return
the new database ID
"""
_logger.debug("%s: Importing EDI relationship [%r,%r]", model, external_id, value)
target = self._edi_get_object_by_external_id(cr, uid, external_id, model, context=context)
need_new_ext_id = False
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - ID not found, trying name_get",
self._name, external_id, value)
target = self._edi_get_object_by_name(cr, uid, value, model, context=context)
need_new_ext_id = True
if not target:
_logger.debug("%s: Importing EDI relationship [%r,%r] - name not found, creating it!",
self._name, external_id, value)
# also need_new_ext_id here, but already been set above
model = self.pool.get(model)
# should use name_create() but e.g. res.partner won't allow it at the moment
res_id = model.create(cr, uid, {model._rec_name: value}, context=context)
target = model.browse(cr, uid, res_id, context=context)
if need_new_ext_id:
ext_id_members = split_external_id(external_id)
# module name is never used bare when creating ir.model.data entries, in order
# to avoid being taken as part of the module's data, and cleanup up at next update
module = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
# create a new ir.model.data entry for this value
self._edi_external_id(cr, uid, target, existing_id=ext_id_members['id'], existing_module=module, context=context)
return target.id
def edi_import(self, cr, uid, edi_document, context=None):
"""Imports a dict representing an edi.document into the system.
:param dict edi_document: EDI document to import
:return: the database ID of the imported record
"""
assert self._name == edi_document.get('__import_model') or \
('__import_model' not in edi_document and self._name == edi_document.get('__model')), \
"EDI Document Model and current model do not match: '%s' (EDI) vs '%s' (current)" % \
(edi_document['__model'], self._name)
# First check the record is now already known in the database, in which case it is ignored
ext_id_members = split_external_id(edi_document['__id'])
existing = self._edi_get_object_by_external_id(cr, uid, ext_id_members['full'], self._name, context=context)
if existing:
_logger.info("'%s' EDI Document with ID '%s' is already known, skipping import!", self._name, ext_id_members['full'])
return existing.id
record_values = {}
o2m_todo = {} # o2m values are processed after their parent already exists
for field_name, field_value in edi_document.iteritems():
# skip metadata and empty fields
if field_name.startswith('__') or field_value is None or field_value is False:
continue
field_info = self._all_columns.get(field_name)
if not field_info:
_logger.warning('Ignoring unknown field `%s` when importing `%s` EDI document', field_name, self._name)
continue
field = field_info.column
# skip function/related fields
if isinstance(field, fields.function):
_logger.warning("Unexpected function field value found in '%s' EDI document: '%s'" % (self._name, field_name))
continue
relation_model = field._obj
if field._type == 'many2one':
record_values[field_name] = self.edi_import_relation(cr, uid, relation_model,
field_value[1], field_value[0],
context=context)
elif field._type == 'many2many':
record_values[field_name] = [self.edi_import_relation(cr, uid, relation_model, m2m_value[1],
m2m_value[0], context=context)
for m2m_value in field_value]
elif field._type == 'one2many':
# must wait until parent report is imported, as the parent relationship
# is often required in o2m child records
o2m_todo[field_name] = field_value
else:
record_values[field_name] = field_value
module_ref = "%s:%s" % (ext_id_members['module'], ext_id_members['db_uuid'])
record_id = self.pool.get('ir.model.data')._update(cr, uid, self._name, module_ref, record_values,
xml_id=ext_id_members['id'], context=context)
record_display, = self.name_get(cr, uid, [record_id], context=context)
# process o2m values, connecting them to their parent on-the-fly
for o2m_field, o2m_value in o2m_todo.iteritems():
field = self._all_columns[o2m_field].column
dest_model = self.pool.get(field._obj)
for o2m_line in o2m_value:
# link to parent record: expects an (ext_id, name) pair
o2m_line[field._fields_id] = (ext_id_members['full'], record_display[1])
dest_model.edi_import(cr, uid, o2m_line, context=context)
# process the attachments, if any
self._edi_import_attachments(cr, uid, record_id, edi_document, context=context)
return record_id
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
evelinfong/evelinfong.github.io | refs/heads/master | node_modules/utf8/tests/generate-test-data.py | 1788 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
programadorjc/django | refs/heads/master | django/core/mail/backends/smtp.py | 477 | """SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from django.core.mail.utils import DNS_NAME
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, use_ssl=None, timeout=None,
ssl_keyfile=None, ssl_certfile=None,
**kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = settings.EMAIL_HOST_USER if username is None else username
self.password = settings.EMAIL_HOST_PASSWORD if password is None else password
self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls
self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl
self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout
self.ssl_keyfile = settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile
self.ssl_certfile = settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True.")
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
connection_class = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
if self.use_ssl:
connection_params.update({
'keyfile': self.ssl_keyfile,
'certfile': self.ssl_certfile,
})
try:
self.connection = connection_class(self.host, self.port, **connection_params)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.ehlo()
self.connection.starttls(keyfile=self.ssl_keyfile, certfile=self.ssl_certfile)
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except smtplib.SMTPException:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
try:
self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n'))
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
|
jjdmol/LOFAR | refs/heads/master | LCS/Tools/src/makeTest.py | 1 | #! /usr/bin/env python
# Copyright (C) 2005
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
# makeTest.py: Script to make default class files in a Package/test dir
# in the LOFAR development tree. sh scripts and main program are covered
#
# Usage:
# ./makeTest [ -h] [testName]
# Args:
# testName The name of the testClass that will be created
# h,--help usage
# Revisions:
#
# 15-02-2005 Initial Release.
#
# import all packages we need
#
import os
import sys
import getopt
import re
from datetime import date
def openFile(name,mode):
try:
file = open (name,mode)
except IOError, message:
sys.exit("Error opening file: %s" % message)
return file
def changeName(aName):
return "t"+aName[0].upper()+aName[1:len(aName)]
def replacePackageAndClassName(readFile,writeFile,packageName,
testName,subDirName,shortName):
aLine=readFile.readline()
year=`date.today().year`
while aLine != "":
#set start of copyright year
if aLine.find("%YEAR%") > -1:
aLine = str.replace(aLine,"%YEAR%",year)
# replace SUB with Subdir when needed
if aLine.find("%SUB%") > -1:
if subDirName != "":
aLine = str.replace(aLine,"%SUB%",subDirName+"/")
else:
aLine = str.replace(aLine,"%SUB%",subDirName)
# replace SUBUPPER with Subdir in uppercase when needed
if aLine.find("%SUBUPPER%") > -1:
if subDirName != "":
aLine = str.replace(aLine,"%SUBUPPER%",subDirName+"/")
else:
aLine = str.replace(aLine,"%SUBUPPER%",subDirName)
# replace PACKAGE with real name
if aLine.find("%PACKAGE%") > -1:
aLine = str.replace(aLine,"%PACKAGE%",packageName)
# replace PACKAGEUPPER with uppercase Package name
if aLine.find("%PACKAGEUPPER%") > -1:
aLine = str.replace(aLine,"%PACKAGEUPPER%",packageName.upper())
# replace TEST with testname
if aLine.find("%TEST%") > -1:
aLine = str.replace(aLine,"%TEST%",testName)
# replace TESTSHORT with real given name
if aLine.find("%TESTSHORT%") > -1:
aLine = str.replace(aLine,"%TESTSHORT%",shortName)
# replace TESTUPPER with uppercase testname
if aLine.find("%TESTUPPER%") > -1:
aLine = str.replace(aLine,"%TESTUPPER%",testName.upper())
writeFile.write(aLine)
aLine=readFile.readline()
def makeTestFiles(lofarDir,testName,packageName,testDir,subDirName,shortName,dirLevel):
# main test (.cc file)
readFile=openFile(lofarDir+"/templates/test.cc_template","r")
writeFile=openFile(testName+".cc","w")
replacePackageAndClassName(readFile,writeFile,packageName,testName,subDirName,shortName)
writeFile.close()
readFile.close()
addToMakefile("cc",testName,testDir,subDirName,packageName,dirLevel)
# test run script (.sh file)
readFile=openFile(lofarDir+"/templates/test.sh_template","r")
writeFile=openFile(testName+".sh","w")
replacePackageAndClassName(readFile,writeFile,packageName,testName,subDirName,shortName)
writeFile.close()
readFile.close()
addToMakefile("sh",testName,testDir,subDirName,packageName,dirLevel)
# test input (.in file)
readFile=openFile(lofarDir+"/templates/test.in_template","r")
writeFile=openFile(testName+".in","w")
replacePackageAndClassName(readFile,writeFile,packageName,testName,subDirName,shortName)
writeFile.close()
readFile.close()
# test output (.out file)
readFile=openFile(lofarDir+"/templates/test.out_template","r")
writeFile=openFile(testName+".out","w")
replacePackageAndClassName(readFile,writeFile,packageName,testName,subDirName,shortName)
writeFile.close()
readFile.close()
def addToMakefile(type,testName,testDir,subDirName,packageName,dirLevel):
checkPattern=re.compile('^([ \t]*)check_PROGRAMS[ \t]*=.*$',re.IGNORECASE)
testPattern=re.compile('^([ \t]*)TESTSCRIPTS[ \t]*=.*$',re.IGNORECASE)
startPattern=re.compile('^([ \t]*)XFAIL_TESTS[ \t]*=.*$',re.IGNORECASE)
os.rename(testDir+"/Makefile.am",testDir+"/Makefile.am.old")
readFile=openFile(testDir+"/Makefile.am.old","r")
writeFile=openFile(testDir+"/Makefile.am","w")
searchEnd=0
levelString="../"
for i in range(0,dirLevel):
levelString +="../"
aLine=readFile.readline()
while aLine != "":
if subDirName != "":
extendedTestName=subDirName+"/t"+testName
else:
extendedTestName=testName
if type == "cc":
# add testprogram to check_PROGRAMS
if checkPattern.search(aLine):
#find / to see if the line already contains another source
front,end = aLine.split("=")
if re.search("[a-zA-Z]",end):
writeFile.write(front+" = "+extendedTestName+" \\\n")
writeFile.write("\t\t"+end)
else :
writeFile.write(front+" = "+extendedTestName+"\n")
elif startPattern.search(aLine):
writeFile.write(aLine)
writeFile.write("\n")
writeFile.write(testName+"_SOURCES = "+extendedTestName+".cc\n")
writeFile.write(testName+"_LDADD = "+levelString+"src/lib"+packageName+".la\n")
writeFile.write(testName+"_DEPENDENCIES = "+levelString+"src/lib"+packageName+".la $(LOFAR_DEPEND)\n")
else:
writeFile.write(aLine)
if type == "sh":
# add testprogram script to TESTSCRIPTS
if testPattern.search(aLine):
#find / to see if the line already contains another source
front,end = aLine.split("=")
if re.search("[a-zA-Z]",end):
writeFile.write(front+" = "+extendedTestName+".sh \\\n")
writeFile.write("\t\t"+end)
else :
writeFile.write(front+" = "+extendedTestName+".sh\n")
else:
writeFile.write(aLine)
#else:
# writeFile.write(aLine)
aLine=readFile.readline()
writeFile.close()
readFile.close()
os.unlink(testDir+"/Makefile.am.old")
def usage():
print "usage: "+sys.argv[0]+" [-h] testName [testName...]"
print "args: -h,--help - print usage"
print " testName [testName...]- name of the testClass(es) to be created."
sys.exit(2)
def main(argv):
testName = "None"
#
# get Lofar base dir
#
if "LOFARROOT" in os.environ:
lofarDir = os.environ["LOFARROOT"] + "/share"
else:
file= os.popen("echo $PWD | sed -e 's%/LOFAR/.*%/LOFAR%'")
lofarDir=str.replace(file.readline(),"\n","")
file.close()
baseDir = os.environ["PWD"]
subDirName = ""
packageName = ""
testDir = ""
os.path.basename(os.path.dirname(baseDir));
# look if we are in a subdir within test
if baseDir.find("test") > -1 :
if os.path.basename(os.path.dirname(baseDir)) == "test":
testDir,subDirName=os.path.split(baseDir)
packageName=os.path.basename(os.path.dirname(testDir))
elif os.path.split(baseDir)[1] != "test":
print "Sorry, only one level of subdirs is allowed in test."
usage()
else:
packageName=os.path.basename(os.path.dirname(baseDir))
testDir=baseDir
else:
print "You have to be in the testdir or one of its subdirs to run this program."
usage()
try:
opts, args = getopt.getopt(argv, "h",
["help"])
except getopt.GetoptError:
usage()
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
if len(args) <= 0 and testName == "None":
usage()
if len(sys.argv) < 1:
usage()
# find out the directory sublevel
dirLevel=len(baseDir.split('/'))-len(testDir.split('/'))
print "Level: "+`dirLevel`
#
# Make a backup from the Original Makefile(s)
#
os.system("cp "+testDir+"/Makefile.am "+testDir+"/Makefile.am.save")
for aName in args:
testName=changeName(aName)
shortName=aName
#
# print info
#
print "Trying to set up test files and programs for " + shortName + " in package " + packageName
#
# Check of given testname already exists in the working directory as
# directory or as file
#
if os.path.isfile(testName+".cc"):
print "Sorry, that test already exists. Please take another name"
sys.exit(1)
#
# Create all initial files from templates
#
makeTestFiles(lofarDir,testName,packageName,testDir,subDirName,shortName,dirLevel)
#
# this is the main entry
#
if __name__ == "__main__":
main(sys.argv[1:])
print "Done"
|
amitjamadagni/sympy | refs/heads/master | examples/advanced/qft.py | 2 | #!/usr/bin/env python
"""Quantum field theory example
* http://en.wikipedia.org/wiki/Quantum_field_theory
This particular example is a work in progress. Currently it calculates the
scattering amplitude of the process:
electron + positron -> photon -> electron + positron
in QED (http://en.wikipedia.org/wiki/Quantum_electrodynamics). The aim
is to be able to do any kind of calculations in QED or standard model in
SymPy, but that's a long journey.
"""
from sympy import Basic, exp, Symbol, sin, Rational, I, Mul, Matrix, \
ones, sqrt, pprint, simplify, Eq, sympify
from sympy.physics import msigma, mgamma
#gamma^mu
gamma0 = mgamma(0)
gamma1 = mgamma(1)
gamma2 = mgamma(2)
gamma3 = mgamma(3)
gamma5 = mgamma(5)
#sigma_i
sigma1 = msigma(1)
sigma2 = msigma(2)
sigma3 = msigma(3)
E = Symbol("E", real=True)
m = Symbol("m", real=True)
def u(p, r):
""" p = (p1, p2, p3); r = 0,1 """
assert r in [1, 2]
p1, p2, p3 = p
if r == 1:
ksi = Matrix([[1], [0]])
else:
ksi = Matrix([[0], [1]])
a = (sigma1*p1 + sigma2*p2 + sigma3*p3) / (E + m) * ksi
if a == 0:
a = zeros(2, 1)
return sqrt(E + m) * Matrix([[ksi[0, 0]], [ksi[1, 0]], [a[0, 0]], [a[1, 0]]])
def v(p, r):
""" p = (p1, p2, p3); r = 0,1 """
assert r in [1, 2]
p1, p2, p3 = p
if r == 1:
ksi = Matrix([[1], [0]])
else:
ksi = -Matrix([[0], [1]])
a = (sigma1*p1 + sigma2*p2 + sigma3*p3) / (E + m) * ksi
if a == 0:
a = zeros(2, 1)
return sqrt(E + m) * Matrix([[a[0, 0]], [a[1, 0]], [ksi[0, 0]], [ksi[1, 0]]])
def pslash(p):
p1, p2, p3 = p
p0 = sqrt(m**2 + p1**2 + p2**2 + p3**2)
return gamma0*p0 - gamma1*p1 - gamma2*p2 - gamma3*p3
def Tr(M):
return M.trace()
def xprint(lhs, rhs):
pprint( Eq(sympify(lhs), rhs ) )
def main():
a = Symbol("a", real=True)
b = Symbol("b", real=True)
c = Symbol("c", real=True)
p = (a, b, c)
assert u(p, 1).D * u(p, 2) == Matrix(1, 1, [0])
assert u(p, 2).D * u(p, 1) == Matrix(1, 1, [0])
p1, p2, p3 = [Symbol(x, real=True) for x in ["p1", "p2", "p3"]]
pp1, pp2, pp3 = [Symbol(x, real=True) for x in ["pp1", "pp2", "pp3"]]
k1, k2, k3 = [Symbol(x, real=True) for x in ["k1", "k2", "k3"]]
kp1, kp2, kp3 = [Symbol(x, real=True) for x in ["kp1", "kp2", "kp3"]]
p = (p1, p2, p3)
pp = (pp1, pp2, pp3)
k = (k1, k2, k3)
kp = (kp1, kp2, kp3)
mu = Symbol("mu")
e = (pslash(p) + m*ones(4))*(pslash(k) - m*ones(4))
f = pslash(p) + m*ones(4)
g = pslash(p) - m*ones(4)
#pprint(e)
xprint( 'Tr(f*g)', Tr(f*g) )
#print Tr(pslash(p) * pslash(k)).expand()
M0 = [ ( v(pp, 1).D * mgamma(mu) * u(p, 1) ) * ( u(k, 1).D * mgamma(mu, True) *
v(kp, 1) ) for mu in range(4)]
M = M0[0] + M0[1] + M0[2] + M0[3]
M = M[0]
assert isinstance(M, Basic)
#print M
#print simplify(M)
d = Symbol("d", real=True) # d=E+m
xprint('M', M)
print "-"*40
M = ((M.subs(E, d - m)).expand() * d**2 ).expand()
xprint('M2', 1/(E + m)**2 * M)
print "-"*40
x, y = M.as_real_imag()
xprint('Re(M)', x)
xprint('Im(M)', y)
e = x**2 + y**2
xprint('abs(M)**2', e)
print "-"*40
xprint('Expand(abs(M)**2)', e.expand())
#print Pauli(1)*Pauli(1)
#print Pauli(1)**2
#print Pauli(1)*2*Pauli(1)
if __name__ == "__main__":
main()
|
g19-hs/personfinder | refs/heads/master | app/pytz/zoneinfo/Greenwich.py | 9 | '''tzinfo timezone information for Greenwich.'''
from pytz.tzinfo import StaticTzInfo
from pytz.tzinfo import memorized_timedelta as timedelta
class Greenwich(StaticTzInfo):
'''Greenwich timezone definition. See datetime.tzinfo for details'''
zone = 'Greenwich'
_utcoffset = timedelta(seconds=0)
_tzname = 'GMT'
Greenwich = Greenwich()
|
Patreon/cartographer | refs/heads/master | example/generic_social_network/app/models/tables/post.py | 1 | from enum import Enum
from generic_social_network.app import db
from generic_social_network.app.models import EnumType
from sqlalchemy import Column, Integer, ForeignKey, String, Text
class PostType(Enum):
TEXT = 'text'
IMAGE = 'image'
ALBUM = 'album'
VIDEO = 'video'
class Post(db.Model):
__tablename__ = 'posts'
post_id = Column(Integer, primary_key=True, nullable=False)
author_id = Column(Integer, ForeignKey("users.user_id"), index=True, nullable=False)
title = Column(String(512), default='')
body = Column(Text)
type = Column(EnumType(PostType))
|
mjuric/duplicity | refs/heads/master | testing/unit/__init__.py | 6 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2012 Canonical Ltd
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from .. import DuplicityTestCase
class UnitTestCase(DuplicityTestCase):
pass
|
csuideal/shadowsocks | refs/heads/master | shadowsocks/encrypt.py | 990 | #!/usr/bin/env python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
|
JimLiu/shadowsocks | refs/heads/master | shadowsocks/encrypt.py | 990 | #!/usr/bin/env python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
|
nilp0inter/django-filter | refs/heads/master | django_filters/views.py | 1 | from __future__ import unicode_literals
from django.shortcuts import render_to_response
from django.template import RequestContext
from django_filters.filterset import FilterSet
def object_filter(request, model=None, queryset=None, template_name=None, extra_context=None,
context_processors=None, filter_class=None):
if model is None and filter_class is None:
raise TypeError("object_filter must be called with either model or filter_class")
if model is None:
model = filter_class._meta.model
if filter_class is None:
meta = type(str('Meta'), (object,), {'model': model})
filter_class = type(str('%sFilterSet' % model._meta.object_name),
(FilterSet,),
{'Meta': meta})
filterset = filter_class(request.GET or None, queryset=queryset)
if not template_name:
template_name = '%s/%s_filter.html' % (model._meta.app_label, model._meta.object_name.lower())
c = RequestContext(request, {
'filter': filterset,
})
if extra_context:
for k, v in extra_context.iteritems():
if callable(v):
v = v()
c[k] = v
return render_to_response(template_name, c)
|
Belxjander/Kirito | refs/heads/master | DarkWallet/Dat.Wallet/ecdsa/der.py | 26 | from __future__ import division
import binascii
import base64
from .six import int2byte, b, PY3, integer_types, text_type
class UnexpectedDER(Exception):
pass
def encode_constructed(tag, value):
return int2byte(0xa0+tag) + encode_length(len(value)) + value
def encode_integer(r):
assert r >= 0 # can't support negative numbers yet
h = ("%x" % r).encode()
if len(h) % 2:
h = b("0") + h
s = binascii.unhexlify(h)
num = s[0] if isinstance(s[0], integer_types) else ord(s[0])
if num <= 0x7f:
return b("\x02") + int2byte(len(s)) + s
else:
# DER integers are two's complement, so if the first byte is
# 0x80-0xff then we need an extra 0x00 byte to prevent it from
# looking negative.
return b("\x02") + int2byte(len(s)+1) + b("\x00") + s
def encode_bitstring(s):
return b("\x03") + encode_length(len(s)) + s
def encode_octet_string(s):
return b("\x04") + encode_length(len(s)) + s
def encode_oid(first, second, *pieces):
assert first <= 2
assert second <= 39
encoded_pieces = [int2byte(40*first+second)] + [encode_number(p)
for p in pieces]
body = b('').join(encoded_pieces)
return b('\x06') + encode_length(len(body)) + body
def encode_sequence(*encoded_pieces):
total_len = sum([len(p) for p in encoded_pieces])
return b('\x30') + encode_length(total_len) + b('').join(encoded_pieces)
def encode_number(n):
b128_digits = []
while n:
b128_digits.insert(0, (n & 0x7f) | 0x80)
n = n >> 7
if not b128_digits:
b128_digits.append(0)
b128_digits[-1] &= 0x7f
return b('').join([int2byte(d) for d in b128_digits])
def remove_constructed(string):
s0 = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if (s0 & 0xe0) != 0xa0:
raise UnexpectedDER("wanted constructed tag (0xa0-0xbf), got 0x%02x"
% s0)
tag = s0 & 0x1f
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return tag, body, rest
def remove_sequence(string):
if not string.startswith(b("\x30")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted sequence (0x30), got 0x%02x" % n)
length, lengthlength = read_length(string[1:])
endseq = 1+lengthlength+length
return string[1+lengthlength:endseq], string[endseq:]
def remove_octet_string(string):
if not string.startswith(b("\x04")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted octetstring (0x04), got 0x%02x" % n)
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return body, rest
def remove_object(string):
if not string.startswith(b("\x06")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted object (0x06), got 0x%02x" % n)
length, lengthlength = read_length(string[1:])
body = string[1+lengthlength:1+lengthlength+length]
rest = string[1+lengthlength+length:]
numbers = []
while body:
n, ll = read_number(body)
numbers.append(n)
body = body[ll:]
n0 = numbers.pop(0)
first = n0//40
second = n0-(40*first)
numbers.insert(0, first)
numbers.insert(1, second)
return tuple(numbers), rest
def remove_integer(string):
if not string.startswith(b("\x02")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted integer (0x02), got 0x%02x" % n)
length, llen = read_length(string[1:])
numberbytes = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
nbytes = numberbytes[0] if isinstance(numberbytes[0], integer_types) else ord(numberbytes[0])
assert nbytes < 0x80 # can't support negative numbers yet
return int(binascii.hexlify(numberbytes), 16), rest
def read_number(string):
number = 0
llen = 0
# base-128 big endian, with b7 set in all but the last byte
while True:
if llen > len(string):
raise UnexpectedDER("ran out of length bytes")
number = number << 7
d = string[llen] if isinstance(string[llen], integer_types) else ord(string[llen])
number += (d & 0x7f)
llen += 1
if not d & 0x80:
break
return number, llen
def encode_length(l):
assert l >= 0
if l < 0x80:
return int2byte(l)
s = ("%x" % l).encode()
if len(s)%2:
s = b("0")+s
s = binascii.unhexlify(s)
llen = len(s)
return int2byte(0x80|llen) + s
def read_length(string):
num = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if not (num & 0x80):
# short form
return (num & 0x7f), 1
# else long-form: b0&0x7f is number of additional base256 length bytes,
# big-endian
llen = num & 0x7f
if llen > len(string)-1:
raise UnexpectedDER("ran out of length bytes")
return int(binascii.hexlify(string[1:1+llen]), 16), 1+llen
def remove_bitstring(string):
num = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if not string.startswith(b("\x03")):
raise UnexpectedDER("wanted bitstring (0x03), got 0x%02x" % num)
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return body, rest
# SEQUENCE([1, STRING(secexp), cont[0], OBJECT(curvename), cont[1], BINTSTRING)
# signatures: (from RFC3279)
# ansi-X9-62 OBJECT IDENTIFIER ::= {
# iso(1) member-body(2) us(840) 10045 }
#
# id-ecSigType OBJECT IDENTIFIER ::= {
# ansi-X9-62 signatures(4) }
# ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
# id-ecSigType 1 }
## so 1,2,840,10045,4,1
## so 0x42, .. ..
# Ecdsa-Sig-Value ::= SEQUENCE {
# r INTEGER,
# s INTEGER }
# id-public-key-type OBJECT IDENTIFIER ::= { ansi-X9.62 2 }
#
# id-ecPublicKey OBJECT IDENTIFIER ::= { id-publicKeyType 1 }
# I think the secp224r1 identifier is (t=06,l=05,v=2b81040021)
# secp224r1 OBJECT IDENTIFIER ::= {
# iso(1) identified-organization(3) certicom(132) curve(0) 33 }
# and the secp384r1 is (t=06,l=05,v=2b81040022)
# secp384r1 OBJECT IDENTIFIER ::= {
# iso(1) identified-organization(3) certicom(132) curve(0) 34 }
def unpem(pem):
if isinstance(pem, text_type):
pem = pem.encode()
d = b("").join([l.strip() for l in pem.split(b("\n"))
if l and not l.startswith(b("-----"))])
return base64.b64decode(d)
def topem(der, name):
b64 = base64.b64encode(der)
lines = [("-----BEGIN %s-----\n" % name).encode()]
lines.extend([b64[start:start+64]+b("\n")
for start in range(0, len(b64), 64)])
lines.append(("-----END %s-----\n" % name).encode())
return b("").join(lines)
|
benthomasson/ansible | refs/heads/devel | lib/ansible/plugins/cache/base.py | 232 | # (c) 2017, ansible by Red Hat
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# moved actual classes to __init__ kept here for backward compat with 3rd parties
from ansible.plugins.cache import BaseCacheModule, BaseFileCacheModule
|
SMALLplayer/smallplayer-image-creator | refs/heads/master | storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/helpers/smilhelper.py | 1 | #===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
import string
from regexer import Regexer
#===============================================================================
# Make global object available
#===============================================================================
#from logger import Logger
class SmilHelper:
"""Class that could help with parsing of simple Smil files"""
def __init__(self, data):
"""Creates a class object with Smil <data>
Arguments:
data : string - Smil data to parse
Example data:
<smil xmlns="http://www.w3.org/2001/SMIL20/Language">
<head>
<meta name="title" content="myStream"/>
<meta name="httpBase" content="http://mydomain.com/"/>
<meta name="rtmpPlaybackBase" content="http://mydomain.com/"/>
</head>
<body>
<switch>
<video src="myStream500K@54552" system-bitrate="500000"/>
<video src="myStream900K@54552" system-bitrate="900000"/>
<video src="myStream1500K@54552" system-bitrate="1500000"/>
</switch>
</body>
"""
self.data = data
def GetBaseUrl(self):
"""Retrieves the BaseUrl from the Smil data.
From the example data it would be http://mydomain.com
"""
regex = '<meta base="([^"]+)" />'
results = Regexer.DoRegex(regex, self.data)
if len(results) > 0:
return results[0]
else:
regex = '<meta name="httpBase" content="([^"]+)"\W*/>'
results = Regexer.DoRegex(regex, self.data)
if len(results) > 0:
return results[0]
else:
return ""
def GetBestVideo(self):
"""Returns a list of video's with the highest quality.
In this case: myStream1500K@54552
"""
urls = self.GetVideosAndBitrates()
if urls == None:
return ""
urls.sort(lambda x, y: int(y[1])-int(x[1]))
return urls[0][0]
def GetVideosAndBitrates(self):
"""Returns a list of all video's and bitrates in the Smil file.
In this case:
["myStream500K@54552", "500000"]
["myStream900K@54552", "900000"]
["myStream1500K@54552", "1500000"]
"""
regex = '<video src="([^"]+)"[^>]+system-bitrate="([^"]+)"'
results = Regexer.DoRegex(regex, self.data)
if len(results) > 0:
return results
else:
return None
def GetSubtitle(self):
""" Retrieves the URL of the included subtitle"""
regex = '<param\W*name="subtitle"[^>]*value="([^"]+)'
urls = Regexer.DoRegex(regex, self.data)
for url in urls:
if "http:" in url:
return url
else:
return "%s/%s" % (self.GetBaseUrl().rstrip("/"), url.lstrip("/"))
return ""
def StripTypeStart(self, url):
"""Strips the first part of an URL up to the first /
Arguments:
url : string - the URL to strip
Returns:
The stripped URL, duh!
Example:
mp4:/mp4root/2009-04-14/pid201_671978_T1L__671978_T6MP48_.mp4 -> /mp4root/2009-04-14/pid201_671978_T1L__671978_T6MP48_.mp4
"""
pos = string.find(url, '/')
return url[pos:] |
dNG-git/mp_core | refs/heads/master | src/dNG/data/upnp/resources/hook_resource.py | 1 | # -*- coding: utf-8 -*-
"""
MediaProvider
A device centric multimedia solution
----------------------------------------------------------------------------
(C) direct Netware Group - All rights reserved
https://www.direct-netware.de/redirect?mp;core
The following license agreement remains valid unless any additions or
changes are being made by direct Netware Group in a written form.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
----------------------------------------------------------------------------
https://www.direct-netware.de/redirect?licenses;gpl
----------------------------------------------------------------------------
#echo(mpCoreVersion)#
#echo(__FILEPATH__)#
"""
# pylint: disable=import-error,no-name-in-module
try: from urllib.parse import parse_qsl, unquote, urlsplit
except ImportError: from urlparse import parse_qsl, unquote, urlsplit
from dNG.plugins.hook import Hook
from dNG.runtime.not_implemented_exception import NotImplementedException
from .abstract import Abstract
class HookResource(Abstract):
"""
"HookResource" is a hook based UPnP resource.
:author: direct Netware Group et al.
:copyright: direct Netware Group - All rights reserved
:package: mp
:subpackage: core
:since: v0.2.00
:license: https://www.direct-netware.de/redirect?licenses;gpl
GNU General Public License 2
"""
def __init__(self):
"""
Constructor __init__(HookResource)
:since: v0.2.00
"""
Abstract.__init__(self)
self.hook_id = None
"""
UPnP resource's hook ID
"""
self.hook_params = { }
"""
UPnP resource hook's parameters
"""
self.virtual_resource = True
#
def add_content(self, resource):
"""
Add the given resource to the content list.
:param resource: UPnP resource
:return: (bool) True on success
:since: v0.2.00
"""
raise NotImplementedException()
#
def delete(self):
"""
Deletes this entry from the database.
:since: v0.2.00
"""
raise NotImplementedException()
#
def init(self, data):
"""
Initializes a new resource with the data given.
:param data: UPnP resource data
:return: (bool) Returns true if initialization was successful.
:since: v0.2.00
"""
if ("name" not in data): data['name'] = self.hook_id
if ("type" not in data): data['type'] = HookResource.TYPE_CDS_CONTAINER
if ("type_class" not in data): data['type_class'] = "object.container"
return Abstract.init(self, data)
#
def init_cds_id(self, _id, client_user_agent = None, deleted = False):
"""
Initialize a UPnP resource by CDS ID.
:param _id: UPnP CDS ID
:param client_user_agent: Client user agent
:param deleted: True to include deleted resources
:return: (bool) Returns true if initialization was successful.
:since: v0.2.00
"""
Abstract.init_cds_id(self, _id, client_user_agent, deleted)
_return = (self.resource_id is not None)
if (_return):
url_elements = urlsplit(self.resource_id)
url_path_elements = url_elements.path[1:].split("/", 1)
hook_id = url_path_elements[0]
hook_params = (dict(parse_qsl(unquote(url_path_elements[1]), keep_blank_values = True))
if (len(url_path_elements) == 2) else
{ }
)
resource_data = Hook.call("mp.upnp.HookResource.getResourceData", id = hook_id, **hook_params)
if (self.init(resource_data)):
self.hook_id = hook_id
self.hook_params = hook_params
else: _return = False
#
return _return
#
def _init_content(self):
"""
Initializes the content of a container.
:return: (bool) True if successful
:since: v0.2.00
"""
_return = False
params = self.hook_params.copy()
params['id'] = self.hook_id
params['offset'] = self.content_offset
params['limit'] = self.content_limit
children = Hook.call("mp.upnp.HookResource.getChildren", **params)
if (children is not None):
self.content = children
_return = True
#
return _return
#
def remove_content(self, resource):
"""
Removes the given resource from the content list.
:param resource: UPnP resource
:return: (bool) True on success
:since: v0.2.00
"""
raise NotImplementedException()
#
#
|
carthagecollege/django-djauth | refs/heads/master | djauth/views.py | 1 | from django.contrib.auth import SESSION_KEY, BACKEND_SESSION_KEY
import django
def loggedout(request, template_name='accounts/logged_out.html'):
"""
Remove the authenticated user's ID from the request.
"""
# django auth
try:
del request.session[SESSION_KEY]
except KeyError:
pass
try:
del request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
from django.shortcuts import render
response = render(request, template_name)
return response
|
davidvon/pipa-pay-server | refs/heads/master | site-packages/migrate/changeset/schema.py | 2 | """
Schema module providing common schema operations.
"""
import warnings
from UserDict import DictMixin
import sqlalchemy
from sqlalchemy.schema import ForeignKeyConstraint
from sqlalchemy.schema import UniqueConstraint
from migrate.exceptions import *
from migrate.changeset import SQLA_07, SQLA_08
from migrate.changeset.databases.visitor import (get_engine_visitor,
run_single_visitor)
__all__ = [
'create_column',
'drop_column',
'alter_column',
'rename_table',
'rename_index',
'ChangesetTable',
'ChangesetColumn',
'ChangesetIndex',
'ChangesetDefaultClause',
'ColumnDelta',
]
def create_column(column, table=None, *p, **kw):
"""Create a column, given the table.
API to :meth:`ChangesetColumn.create`.
"""
if table is not None:
return table.create_column(column, *p, **kw)
return column.create(*p, **kw)
def drop_column(column, table=None, *p, **kw):
"""Drop a column, given the table.
API to :meth:`ChangesetColumn.drop`.
"""
if table is not None:
return table.drop_column(column, *p, **kw)
return column.drop(*p, **kw)
def rename_table(table, name, engine=None, **kw):
"""Rename a table.
If Table instance is given, engine is not used.
API to :meth:`ChangesetTable.rename`.
:param table: Table to be renamed.
:param name: New name for Table.
:param engine: Engine instance.
:type table: string or Table instance
:type name: string
:type engine: obj
"""
table = _to_table(table, engine)
table.rename(name, **kw)
def rename_index(index, name, table=None, engine=None, **kw):
"""Rename an index.
If Index instance is given,
table and engine are not used.
API to :meth:`ChangesetIndex.rename`.
:param index: Index to be renamed.
:param name: New name for index.
:param table: Table to which Index is reffered.
:param engine: Engine instance.
:type index: string or Index instance
:type name: string
:type table: string or Table instance
:type engine: obj
"""
index = _to_index(index, table, engine)
index.rename(name, **kw)
def alter_column(*p, **k):
"""Alter a column.
This is a helper function that creates a :class:`ColumnDelta` and
runs it.
:argument column:
The name of the column to be altered or a
:class:`ChangesetColumn` column representing it.
:param table:
A :class:`~sqlalchemy.schema.Table` or table name to
for the table where the column will be changed.
:param engine:
The :class:`~sqlalchemy.engine.base.Engine` to use for table
reflection and schema alterations.
:returns: A :class:`ColumnDelta` instance representing the change.
"""
if 'table' not in k and isinstance(p[0], sqlalchemy.Column):
k['table'] = p[0].table
if 'engine' not in k:
k['engine'] = k['table'].bind
# deprecation
if len(p) >= 2 and isinstance(p[1], sqlalchemy.Column):
warnings.warn(
"Passing a Column object to alter_column is deprecated."
" Just pass in keyword parameters instead.",
MigrateDeprecationWarning
)
engine = k['engine']
# enough tests seem to break when metadata is always altered
# that this crutch has to be left in until they can be sorted
# out
k['alter_metadata']=True
delta = ColumnDelta(*p, **k)
visitorcallable = get_engine_visitor(engine, 'schemachanger')
engine._run_visitor(visitorcallable, delta)
return delta
def _to_table(table, engine=None):
"""Return if instance of Table, else construct new with metadata"""
if isinstance(table, sqlalchemy.Table):
return table
# Given: table name, maybe an engine
meta = sqlalchemy.MetaData()
if engine is not None:
meta.bind = engine
return sqlalchemy.Table(table, meta)
def _to_index(index, table=None, engine=None):
"""Return if instance of Index, else construct new with metadata"""
if isinstance(index, sqlalchemy.Index):
return index
# Given: index name; table name required
table = _to_table(table, engine)
ret = sqlalchemy.Index(index)
ret.table = table
return ret
class ColumnDelta(DictMixin, sqlalchemy.schema.SchemaItem):
"""Extracts the differences between two columns/column-parameters
May receive parameters arranged in several different ways:
* **current_column, new_column, \*p, \*\*kw**
Additional parameters can be specified to override column
differences.
* **current_column, \*p, \*\*kw**
Additional parameters alter current_column. Table name is extracted
from current_column object.
Name is changed to current_column.name from current_name,
if current_name is specified.
* **current_col_name, \*p, \*\*kw**
Table kw must specified.
:param table: Table at which current Column should be bound to.\
If table name is given, reflection will be used.
:type table: string or Table instance
:param metadata: A :class:`MetaData` instance to store
reflected table names
:param engine: When reflecting tables, either engine or metadata must \
be specified to acquire engine object.
:type engine: :class:`Engine` instance
:returns: :class:`ColumnDelta` instance provides interface for altered attributes to \
`result_column` through :func:`dict` alike object.
* :class:`ColumnDelta`.result_column is altered column with new attributes
* :class:`ColumnDelta`.current_name is current name of column in db
"""
# Column attributes that can be altered
diff_keys = ('name', 'type', 'primary_key', 'nullable',
'server_onupdate', 'server_default', 'autoincrement')
diffs = dict()
__visit_name__ = 'column'
def __init__(self, *p, **kw):
# 'alter_metadata' is not a public api. It exists purely
# as a crutch until the tests that fail when 'alter_metadata'
# behaviour always happens can be sorted out
self.alter_metadata = kw.pop("alter_metadata", False)
self.meta = kw.pop("metadata", None)
self.engine = kw.pop("engine", None)
# Things are initialized differently depending on how many column
# parameters are given. Figure out how many and call the appropriate
# method.
if len(p) >= 1 and isinstance(p[0], sqlalchemy.Column):
# At least one column specified
if len(p) >= 2 and isinstance(p[1], sqlalchemy.Column):
# Two columns specified
diffs = self.compare_2_columns(*p, **kw)
else:
# Exactly one column specified
diffs = self.compare_1_column(*p, **kw)
else:
# Zero columns specified
if not len(p) or not isinstance(p[0], basestring):
raise ValueError("First argument must be column name")
diffs = self.compare_parameters(*p, **kw)
self.apply_diffs(diffs)
def __repr__(self):
return '<ColumnDelta altermetadata=%r, %s>' % (
self.alter_metadata,
super(ColumnDelta, self).__repr__()
)
def __getitem__(self, key):
if key not in self.keys():
raise KeyError("No such diff key, available: %s" % self.diffs )
return getattr(self.result_column, key)
def __setitem__(self, key, value):
if key not in self.keys():
raise KeyError("No such diff key, available: %s" % self.diffs )
setattr(self.result_column, key, value)
def __delitem__(self, key):
raise NotImplementedError
def keys(self):
return self.diffs.keys()
def compare_parameters(self, current_name, *p, **k):
"""Compares Column objects with reflection"""
self.table = k.pop('table')
self.result_column = self._table.c.get(current_name)
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def compare_1_column(self, col, *p, **k):
"""Compares one Column object"""
self.table = k.pop('table', None)
if self.table is None:
self.table = col.table
self.result_column = col
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def compare_2_columns(self, old_col, new_col, *p, **k):
"""Compares two Column objects"""
self.process_column(new_col)
self.table = k.pop('table', None)
# we cannot use bool() on table in SA06
if self.table is None:
self.table = old_col.table
if self.table is None:
new_col.table
self.result_column = old_col
# set differences
# leave out some stuff for later comp
for key in (set(self.diff_keys) - set(('type',))):
val = getattr(new_col, key, None)
if getattr(self.result_column, key, None) != val:
k.setdefault(key, val)
# inspect types
if not self.are_column_types_eq(self.result_column.type, new_col.type):
k.setdefault('type', new_col.type)
if len(p):
k = self._extract_parameters(p, k, self.result_column)
return k
def apply_diffs(self, diffs):
"""Populate dict and column object with new values"""
self.diffs = diffs
for key in self.diff_keys:
if key in diffs:
setattr(self.result_column, key, diffs[key])
self.process_column(self.result_column)
# create an instance of class type if not yet
if 'type' in diffs and callable(self.result_column.type):
self.result_column.type = self.result_column.type()
# add column to the table
if self.table is not None and self.alter_metadata:
self.result_column.add_to_table(self.table)
def are_column_types_eq(self, old_type, new_type):
"""Compares two types to be equal"""
ret = old_type.__class__ == new_type.__class__
# String length is a special case
if ret and isinstance(new_type, sqlalchemy.types.String):
ret = (getattr(old_type, 'length', None) == \
getattr(new_type, 'length', None))
return ret
def _extract_parameters(self, p, k, column):
"""Extracts data from p and modifies diffs"""
p = list(p)
while len(p):
if isinstance(p[0], basestring):
k.setdefault('name', p.pop(0))
elif isinstance(p[0], sqlalchemy.types.AbstractType):
k.setdefault('type', p.pop(0))
elif callable(p[0]):
p[0] = p[0]()
else:
break
if len(p):
new_col = column.copy_fixed()
new_col._init_items(*p)
k = self.compare_2_columns(column, new_col, **k)
return k
def process_column(self, column):
"""Processes default values for column"""
# XXX: this is a snippet from SA processing of positional parameters
toinit = list()
if column.server_default is not None:
if isinstance(column.server_default, sqlalchemy.FetchedValue):
toinit.append(column.server_default)
else:
toinit.append(sqlalchemy.DefaultClause(column.server_default))
if column.server_onupdate is not None:
if isinstance(column.server_onupdate, FetchedValue):
toinit.append(column.server_default)
else:
toinit.append(sqlalchemy.DefaultClause(column.server_onupdate,
for_update=True))
if toinit:
column._init_items(*toinit)
def _get_table(self):
return getattr(self, '_table', None)
def _set_table(self, table):
if isinstance(table, basestring):
if self.alter_metadata:
if not self.meta:
raise ValueError("metadata must be specified for table"
" reflection when using alter_metadata")
meta = self.meta
if self.engine:
meta.bind = self.engine
else:
if not self.engine and not self.meta:
raise ValueError("engine or metadata must be specified"
" to reflect tables")
if not self.engine:
self.engine = self.meta.bind
meta = sqlalchemy.MetaData(bind=self.engine)
self._table = sqlalchemy.Table(table, meta, autoload=True)
elif isinstance(table, sqlalchemy.Table):
self._table = table
if not self.alter_metadata:
self._table.meta = sqlalchemy.MetaData(bind=self._table.bind)
def _get_result_column(self):
return getattr(self, '_result_column', None)
def _set_result_column(self, column):
"""Set Column to Table based on alter_metadata evaluation."""
self.process_column(column)
if not hasattr(self, 'current_name'):
self.current_name = column.name
if self.alter_metadata:
self._result_column = column
else:
self._result_column = column.copy_fixed()
table = property(_get_table, _set_table)
result_column = property(_get_result_column, _set_result_column)
class ChangesetTable(object):
"""Changeset extensions to SQLAlchemy tables."""
def create_column(self, column, *p, **kw):
"""Creates a column.
The column parameter may be a column definition or the name of
a column in this table.
API to :meth:`ChangesetColumn.create`
:param column: Column to be created
:type column: Column instance or string
"""
if not isinstance(column, sqlalchemy.Column):
# It's a column name
column = getattr(self.c, str(column))
column.create(table=self, *p, **kw)
def drop_column(self, column, *p, **kw):
"""Drop a column, given its name or definition.
API to :meth:`ChangesetColumn.drop`
:param column: Column to be droped
:type column: Column instance or string
"""
if not isinstance(column, sqlalchemy.Column):
# It's a column name
try:
column = getattr(self.c, str(column))
except AttributeError:
# That column isn't part of the table. We don't need
# its entire definition to drop the column, just its
# name, so create a dummy column with the same name.
column = sqlalchemy.Column(str(column), sqlalchemy.Integer())
column.drop(table=self, *p, **kw)
def rename(self, name, connection=None, **kwargs):
"""Rename this table.
:param name: New name of the table.
:type name: string
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
engine = self.bind
self.new_name = name
visitorcallable = get_engine_visitor(engine, 'schemachanger')
run_single_visitor(engine, visitorcallable, self, connection, **kwargs)
# Fix metadata registration
self.name = name
self.deregister()
self._set_parent(self.metadata)
def _meta_key(self):
"""Get the meta key for this table."""
return sqlalchemy.schema._get_table_key(self.name, self.schema)
def deregister(self):
"""Remove this table from its metadata"""
if SQLA_07:
self.metadata._remove_table(self.name, self.schema)
else:
key = self._meta_key()
meta = self.metadata
if key in meta.tables:
del meta.tables[key]
class ChangesetColumn(object):
"""Changeset extensions to SQLAlchemy columns."""
def alter(self, *p, **k):
"""Makes a call to :func:`alter_column` for the column this
method is called on.
"""
if 'table' not in k:
k['table'] = self.table
if 'engine' not in k:
k['engine'] = k['table'].bind
return alter_column(self, *p, **k)
def create(self, table=None, index_name=None, unique_name=None,
primary_key_name=None, populate_default=True, connection=None, **kwargs):
"""Create this column in the database.
Assumes the given table exists. ``ALTER TABLE ADD COLUMN``,
for most databases.
:param table: Table instance to create on.
:param index_name: Creates :class:`ChangesetIndex` on this column.
:param unique_name: Creates :class:\
`~migrate.changeset.constraint.UniqueConstraint` on this column.
:param primary_key_name: Creates :class:\
`~migrate.changeset.constraint.PrimaryKeyConstraint` on this column.
:param populate_default: If True, created column will be \
populated with defaults
:param connection: reuse connection istead of creating new one.
:type table: Table instance
:type index_name: string
:type unique_name: string
:type primary_key_name: string
:type populate_default: bool
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
:returns: self
"""
self.populate_default = populate_default
self.index_name = index_name
self.unique_name = unique_name
self.primary_key_name = primary_key_name
for cons in ('index_name', 'unique_name', 'primary_key_name'):
self._check_sanity_constraints(cons)
self.add_to_table(table)
engine = self.table.bind
visitorcallable = get_engine_visitor(engine, 'columngenerator')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
# TODO: reuse existing connection
if self.populate_default and self.default is not None:
stmt = table.update().values({self: engine._execute_default(self.default)})
engine.execute(stmt)
return self
def drop(self, table=None, connection=None, **kwargs):
"""Drop this column from the database, leaving its table intact.
``ALTER TABLE DROP COLUMN``, for most databases.
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
if table is not None:
self.table = table
engine = self.table.bind
visitorcallable = get_engine_visitor(engine, 'columndropper')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
self.remove_from_table(self.table, unset_table=False)
self.table = None
return self
def add_to_table(self, table):
if table is not None and self.table is None:
if SQLA_07:
table.append_column(self)
else:
self._set_parent(table)
def _col_name_in_constraint(self,cons,name):
return False
def remove_from_table(self, table, unset_table=True):
# TODO: remove primary keys, constraints, etc
if unset_table:
self.table = None
to_drop = set()
for index in table.indexes:
columns = []
for col in index.columns:
if col.name!=self.name:
columns.append(col)
if columns:
index.columns = columns
if SQLA_08:
index.expressions = columns
else:
to_drop.add(index)
table.indexes = table.indexes - to_drop
to_drop = set()
for cons in table.constraints:
# TODO: deal with other types of constraint
if isinstance(cons,(ForeignKeyConstraint,
UniqueConstraint)):
for col_name in cons.columns:
if not isinstance(col_name,basestring):
col_name = col_name.name
if self.name==col_name:
to_drop.add(cons)
table.constraints = table.constraints - to_drop
if table.c.contains_column(self):
if SQLA_07:
table._columns.remove(self)
else:
table.c.remove(self)
# TODO: this is fixed in 0.6
def copy_fixed(self, **kw):
"""Create a copy of this ``Column``, with all attributes."""
return sqlalchemy.Column(self.name, self.type, self.default,
key=self.key,
primary_key=self.primary_key,
nullable=self.nullable,
quote=self.quote,
index=self.index,
unique=self.unique,
onupdate=self.onupdate,
autoincrement=self.autoincrement,
server_default=self.server_default,
server_onupdate=self.server_onupdate,
*[c.copy(**kw) for c in self.constraints])
def _check_sanity_constraints(self, name):
"""Check if constraints names are correct"""
obj = getattr(self, name)
if (getattr(self, name[:-5]) and not obj):
raise InvalidConstraintError("Column.create() accepts index_name,"
" primary_key_name and unique_name to generate constraints")
if not isinstance(obj, basestring) and obj is not None:
raise InvalidConstraintError(
"%s argument for column must be constraint name" % name)
class ChangesetIndex(object):
"""Changeset extensions to SQLAlchemy Indexes."""
__visit_name__ = 'index'
def rename(self, name, connection=None, **kwargs):
"""Change the name of an index.
:param name: New name of the Index.
:type name: string
:param connection: reuse connection istead of creating new one.
:type connection: :class:`sqlalchemy.engine.base.Connection` instance
"""
engine = self.table.bind
self.new_name = name
visitorcallable = get_engine_visitor(engine, 'schemachanger')
engine._run_visitor(visitorcallable, self, connection, **kwargs)
self.name = name
class ChangesetDefaultClause(object):
"""Implements comparison between :class:`DefaultClause` instances"""
def __eq__(self, other):
if isinstance(other, self.__class__):
if self.arg == other.arg:
return True
def __ne__(self, other):
return not self.__eq__(other)
|
ssanderson/docker-py | refs/heads/master | tests/utils_test.py | 2 | import os
import os.path
import unittest
from docker.client import Client
from docker.errors import DockerException
from docker.utils import (
parse_repository_tag, parse_host, convert_filters, kwargs_from_env,
create_host_config
)
from docker.auth import resolve_authconfig
class UtilsTest(unittest.TestCase):
longMessage = True
def setUp(self):
self.os_environ = os.environ.copy()
def tearDown(self):
os.environ = self.os_environ
def test_parse_repository_tag(self):
self.assertEqual(parse_repository_tag("root"),
("root", None))
self.assertEqual(parse_repository_tag("root:tag"),
("root", "tag"))
self.assertEqual(parse_repository_tag("user/repo"),
("user/repo", None))
self.assertEqual(parse_repository_tag("user/repo:tag"),
("user/repo", "tag"))
self.assertEqual(parse_repository_tag("url:5000/repo"),
("url:5000/repo", None))
self.assertEqual(parse_repository_tag("url:5000/repo:tag"),
("url:5000/repo", "tag"))
def test_parse_host(self):
invalid_hosts = [
'0.0.0.0',
'tcp://',
'udp://127.0.0.1',
'udp://127.0.0.1:2375',
]
valid_hosts = {
'0.0.0.1:5555': 'http://0.0.0.1:5555',
':6666': 'http://127.0.0.1:6666',
'tcp://:7777': 'http://127.0.0.1:7777',
'http://:7777': 'http://127.0.0.1:7777',
'https://kokia.jp:2375': 'https://kokia.jp:2375',
'': 'http+unix://var/run/docker.sock',
None: 'http+unix://var/run/docker.sock',
'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
'unix://': 'http+unix://var/run/docker.sock'
}
for host in invalid_hosts:
try:
parsed = parse_host(host)
self.fail('Expected to fail but success: %s -> %s' % (
host, parsed
))
except DockerException:
pass
for host, expected in valid_hosts.items():
self.assertEqual(parse_host(host), expected, msg=host)
def test_kwargs_from_env(self):
os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
DOCKER_CERT_PATH=os.path.join(
os.path.dirname(__file__),
'testdata/certs'),
DOCKER_TLS_VERIFY='1')
kwargs = kwargs_from_env(assert_hostname=False)
self.assertEqual('https://192.168.59.103:2376', kwargs['base_url'])
self.assertTrue('ca.pem' in kwargs['tls'].verify)
self.assertTrue('cert.pem' in kwargs['tls'].cert[0])
self.assertTrue('key.pem' in kwargs['tls'].cert[1])
self.assertEqual(False, kwargs['tls'].assert_hostname)
try:
client = Client(**kwargs)
self.assertEqual(kwargs['base_url'], client.base_url)
self.assertEqual(kwargs['tls'].verify, client.verify)
self.assertEqual(kwargs['tls'].cert, client.cert)
except TypeError as e:
self.fail(e)
def test_convert_filters(self):
tests = [
({'dangling': True}, '{"dangling": ["true"]}'),
({'dangling': "true"}, '{"dangling": ["true"]}'),
({'exited': 0}, '{"exited": [0]}'),
({'exited': [0, 1]}, '{"exited": [0, 1]}'),
]
for filters, expected in tests:
self.assertEqual(convert_filters(filters), expected)
def test_create_host_config(self):
empty_config = create_host_config()
self.assertEqual(empty_config, {})
def test_resolve_authconfig(self):
auth_config = {
'https://index.docker.io/v1/': {'auth': 'indexuser'},
'my.registry.net': {'auth': 'privateuser'},
'http://legacy.registry.url/v1/': {'auth': 'legacyauth'}
}
# hostname only
self.assertEqual(
resolve_authconfig(auth_config, 'my.registry.net'),
{'auth': 'privateuser'}
)
# no protocol
self.assertEqual(
resolve_authconfig(auth_config, 'my.registry.net/v1/'),
{'auth': 'privateuser'}
)
# no path
self.assertEqual(
resolve_authconfig(auth_config, 'http://my.registry.net'),
{'auth': 'privateuser'}
)
# no path, trailing slash
self.assertEqual(
resolve_authconfig(auth_config, 'http://my.registry.net/'),
{'auth': 'privateuser'}
)
# no path, wrong secure protocol
self.assertEqual(
resolve_authconfig(auth_config, 'https://my.registry.net'),
{'auth': 'privateuser'}
)
# no path, wrong insecure protocol
self.assertEqual(
resolve_authconfig(auth_config, 'http://index.docker.io'),
{'auth': 'indexuser'}
)
# with path, wrong protocol
self.assertEqual(
resolve_authconfig(auth_config, 'https://my.registry.net/v1/'),
{'auth': 'privateuser'}
)
# default registry
self.assertEqual(
resolve_authconfig(auth_config), {'auth': 'indexuser'}
)
# default registry (explicit None)
self.assertEqual(
resolve_authconfig(auth_config, None), {'auth': 'indexuser'}
)
# fully explicit
self.assertEqual(
resolve_authconfig(auth_config, 'http://my.registry.net/v1/'),
{'auth': 'privateuser'}
)
# legacy entry in config
self.assertEqual(
resolve_authconfig(auth_config, 'legacy.registry.url'),
{'auth': 'legacyauth'}
)
# no matching entry
self.assertTrue(
resolve_authconfig(auth_config, 'does.not.exist') is None
)
if __name__ == '__main__':
unittest.main()
|
zhangqifan/findSomething | refs/heads/master | FindSomething/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/no-output/gyptest-no-output.py | 349 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verified things don't explode when there are targets without outputs.
"""
import TestGyp
# TODO(evan): in ninja when there are no targets, there is no 'all'
# target either. Disabling this test for now.
test = TestGyp.TestGyp(formats=['!ninja'])
test.run_gyp('nooutput.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('nooutput.gyp', chdir='relocate/src')
test.pass_test()
|
sunxfancy/CodeFactory | refs/heads/master | codefactory/click/testing.py | 136 | import os
import sys
import shutil
import tempfile
import contextlib
from ._compat import iteritems, PY2
# If someone wants to vendor click, we want to ensure the
# correct package is discovered. Ideally we could use a
# relative import here but unfortunately Python does not
# support that.
clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
if PY2:
from cStringIO import StringIO
else:
import io
from ._compat import _find_binary_reader
class EchoingStdin(object):
def __init__(self, input, output):
self._input = input
self._output = output
def __getattr__(self, x):
return getattr(self._input, x)
def _echo(self, rv):
self._output.write(rv)
return rv
def read(self, n=-1):
return self._echo(self._input.read(n))
def readline(self, n=-1):
return self._echo(self._input.readline(n))
def readlines(self):
return [self._echo(x) for x in self._input.readlines()]
def __iter__(self):
return iter(self._echo(x) for x in self._input)
def __repr__(self):
return repr(self._input)
def make_input_stream(input, charset):
# Is already an input stream.
if hasattr(input, 'read'):
if PY2:
return input
rv = _find_binary_reader(input)
if rv is not None:
return rv
raise TypeError('Could not find binary reader for input stream.')
if input is None:
input = b''
elif not isinstance(input, bytes):
input = input.encode(charset)
if PY2:
return StringIO(input)
return io.BytesIO(input)
class Result(object):
"""Holds the captured result of an invoked CLI script."""
def __init__(self, runner, output_bytes, exit_code, exception,
exc_info=None):
#: The runner that created the result
self.runner = runner
#: The output as bytes.
self.output_bytes = output_bytes
#: The exit code as integer.
self.exit_code = exit_code
#: The exception that happend if one did.
self.exception = exception
#: The traceback
self.exc_info = exc_info
@property
def output(self):
"""The output as unicode string."""
return self.output_bytes.decode(self.runner.charset, 'replace') \
.replace('\r\n', '\n')
def __repr__(self):
return '<Result %s>' % (
self.exception and repr(self.exception) or 'okay',
)
class CliRunner(object):
"""The CLI runner provides functionality to invoke a Click command line
script for unittesting purposes in a isolated environment. This only
works in single-threaded systems without any concurrency as it changes the
global interpreter state.
:param charset: the character set for the input and output data. This is
UTF-8 by default and should not be changed currently as
the reporting to Click only works in Python 2 properly.
:param env: a dictionary with environment variables for overriding.
:param echo_stdin: if this is set to `True`, then reading from stdin writes
to stdout. This is useful for showing examples in
some circumstances. Note that regular prompts
will automatically echo the input.
"""
def __init__(self, charset=None, env=None, echo_stdin=False):
if charset is None:
charset = 'utf-8'
self.charset = charset
self.env = env or {}
self.echo_stdin = echo_stdin
def get_default_prog_name(self, cli):
"""Given a command object it will return the default program name
for it. The default is the `name` attribute or ``"root"`` if not
set.
"""
return cli.name or 'root'
def make_env(self, overrides=None):
"""Returns the environment overrides for invoking a script."""
rv = dict(self.env)
if overrides:
rv.update(overrides)
return rv
@contextlib.contextmanager
def isolation(self, input=None, env=None, color=False):
"""A context manager that sets up the isolation for invoking of a
command line tool. This sets up stdin with the given input data
and `os.environ` with the overrides from the given dictionary.
This also rebinds some internals in Click to be mocked (like the
prompt functionality).
This is automatically done in the :meth:`invoke` method.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param input: the input stream to put into sys.stdin.
:param env: the environment overrides as dictionary.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
input = make_input_stream(input, self.charset)
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
old_forced_width = clickpkg.formatting.FORCED_WIDTH
clickpkg.formatting.FORCED_WIDTH = 80
env = self.make_env(env)
if PY2:
sys.stdout = sys.stderr = bytes_output = StringIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
else:
bytes_output = io.BytesIO()
if self.echo_stdin:
input = EchoingStdin(input, bytes_output)
input = io.TextIOWrapper(input, encoding=self.charset)
sys.stdout = sys.stderr = io.TextIOWrapper(
bytes_output, encoding=self.charset)
sys.stdin = input
def visible_input(prompt=None):
sys.stdout.write(prompt or '')
val = input.readline().rstrip('\r\n')
sys.stdout.write(val + '\n')
sys.stdout.flush()
return val
def hidden_input(prompt=None):
sys.stdout.write((prompt or '') + '\n')
sys.stdout.flush()
return input.readline().rstrip('\r\n')
def _getchar(echo):
char = sys.stdin.read(1)
if echo:
sys.stdout.write(char)
sys.stdout.flush()
return char
default_color = color
def should_strip_ansi(stream=None, color=None):
if color is None:
return not default_color
return not color
old_visible_prompt_func = clickpkg.termui.visible_prompt_func
old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
old__getchar_func = clickpkg.termui._getchar
old_should_strip_ansi = clickpkg.utils.should_strip_ansi
clickpkg.termui.visible_prompt_func = visible_input
clickpkg.termui.hidden_prompt_func = hidden_input
clickpkg.termui._getchar = _getchar
clickpkg.utils.should_strip_ansi = should_strip_ansi
old_env = {}
try:
for key, value in iteritems(env):
old_env[key] = os.environ.get(key)
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
yield bytes_output
finally:
for key, value in iteritems(old_env):
if value is None:
try:
del os.environ[key]
except Exception:
pass
else:
os.environ[key] = value
sys.stdout = old_stdout
sys.stderr = old_stderr
sys.stdin = old_stdin
clickpkg.termui.visible_prompt_func = old_visible_prompt_func
clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
clickpkg.termui._getchar = old__getchar_func
clickpkg.utils.should_strip_ansi = old_should_strip_ansi
clickpkg.formatting.FORCED_WIDTH = old_forced_width
def invoke(self, cli, args=None, input=None, env=None,
catch_exceptions=True, color=False, **extra):
"""Invokes a command in an isolated environment. The arguments are
forwarded directly to the command line script, the `extra` keyword
arguments are passed to the :meth:`~clickpkg.Command.main` function of
the command.
This returns a :class:`Result` object.
.. versionadded:: 3.0
The ``catch_exceptions`` parameter was added.
.. versionchanged:: 3.0
The result object now has an `exc_info` attribute with the
traceback if available.
.. versionadded:: 4.0
The ``color`` parameter was added.
:param cli: the command to invoke
:param args: the arguments to invoke
:param input: the input data for `sys.stdin`.
:param env: the environment overrides.
:param catch_exceptions: Whether to catch any other exceptions than
``SystemExit``.
:param extra: the keyword arguments to pass to :meth:`main`.
:param color: whether the output should contain color codes. The
application can still override this explicitly.
"""
exc_info = None
with self.isolation(input=input, env=env, color=color) as out:
exception = None
exit_code = 0
try:
cli.main(args=args or (),
prog_name=self.get_default_prog_name(cli), **extra)
except SystemExit as e:
if e.code != 0:
exception = e
exc_info = sys.exc_info()
exit_code = e.code
if not isinstance(exit_code, int):
sys.stdout.write(str(exit_code))
sys.stdout.write('\n')
exit_code = 1
except Exception as e:
if not catch_exceptions:
raise
exception = e
exit_code = -1
exc_info = sys.exc_info()
finally:
sys.stdout.flush()
output = out.getvalue()
return Result(runner=self,
output_bytes=output,
exit_code=exit_code,
exception=exception,
exc_info=exc_info)
@contextlib.contextmanager
def isolated_filesystem(self):
"""A context manager that creates a temporary folder and changes
the current working directory to it for isolated filesystem tests.
"""
cwd = os.getcwd()
t = tempfile.mkdtemp()
os.chdir(t)
try:
yield t
finally:
os.chdir(cwd)
try:
shutil.rmtree(t)
except (OSError, IOError):
pass
|
petrvanblokland/Xierpa3 | refs/heads/master | xierpa3/builders/indesignbuilder.py | 1 | # -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
# xierpa server
# Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com
#
# X I E R P A 3
# Distribution by the MIT License.
#
# -----------------------------------------------------------------------------
#
# indesignbuilder.py
#
# https://www.adobe.com/content/dam/Adobe/en/devnet/indesign/sdk/cs6/scripting/InDesign_ScriptingTutorial.pdf
# http://wwwimages.adobe.com/www.adobe.com/content/dam/Adobe/en/devnet/indesign/sdk/cs6/scripting/InDesign_ScriptingGuide_JS.pdf
#
from xierpa3.builders.javascriptbuilder import JavaScriptBuilder
from xierpa3.builders.builderparts.xmltransformerpart import XmlTransformerPart
from xierpa3.toolbox.transformer import TX
class InDesignBuilder(XmlTransformerPart, JavaScriptBuilder):
u"""The InDesignBuilder allows to create InDesign JavaScript, that build the document
of the calling site instance.
"""
# Get Constants->Config as class variable, so inheriting classes can redefine values.
C = JavaScriptBuilder.C
ID = 'indesign' # Dispatcher id of this builder
EXTENSION = 'jsx'
INDESIGN_USER = 'petr'
INDESIGN_VERSION = '9.0'
INDESIGN_LANGUAGE = 'en_US'
# Export path, needs user name to complete the path to InDesign script folder.
PATH_EXPORT = '/Users/%s/Library/Preferences/Adobe InDesign/Version %s/%s/Scripts/Scripts Panel/Xierpa3' % (INDESIGN_USER, INDESIGN_VERSION, INDESIGN_LANGUAGE)
def theme(self, component):
self.comment('Document %s' % component.name)
self.output("var myDocument = app.documents.add();\n") # Change for multiple pages inside one document
self.output("var myPage;\n")
self.output("var myTextFrame;\n")
def _theme(self, component):
pass
def page(self, component):
self.output("myPage = myDocument.pages.add();\n")
def _page(self, component):
pass
def text(self, s):
if s is not None:
self.output("""myTextFrame.contents = myTextFrame.contents.concat("%s");\n""" % s.replace('"', '\"').replace('\n', u'¶'))
# M E A S U R E
@classmethod
def M(cls, attribute):
u"""Measurement conversions dispatcher for CSS-like attributes to InDesign units."""
try:
v = attribute.raw
hook = 'm_' + v[0]
v = v[1:]
if hasattr(cls, hook):
v = getattr(cls, hook)(v)
except AttributeError:
v = attribute or 0
# @@@ For now
if isinstance(v, basestring) and v.endswith('%'):
v = 250
return '%spt' % v
@classmethod
def m_Em(cls, value):
return value[0] * 12 # Hard translation from Em to pts for now.
# T A G S
def div(self, **kwargs):
x = kwargs.get('x', kwargs.get('marginleft', 20))
y = kwargs.get('y', kwargs.get('margintop', 20))
w = kwargs.get('width') or 400
h = kwargs.get('height') or 200
self.output("myTextFrame = myPage.textFrames.add();\n")
self.output("""myTextFrame.geometricBounds = ["%s", "%s", "%s", "%s"];\n""" % (self.M(x), self.M(y), self.M(w), self.M(h)))
self.output("""myTextFrame.contents = "";\n""")
def _div(self, comment=None):
if comment is not None:
self.comment(comment)
def h2(self, **kwargs):
self.text('[h2] ')
def _h2(self):
self.text('[/h2] ')
def h4(self, **kwargs):
self.text('[h4] ')
def _h4(self):
self.text('[/h4] ')
def h5(self, **kwargs):
self.text('[h5] ')
def _h5(self):
self.text('[/h5] ')
def p(self, **kwargs):
self.text('[p] ')
def _p(self):
self.text('[/p] ')
def sup(self, **kwargs):
self.text('[sup] ')
def _sup(self):
self.text('[/sup] ')
def pre(self, **kwargs):
self.text('[pre] ')
def _pre(self):
self.text('[/pre] ')
def em(self, **kwargs):
self.text('[em] ')
def _em(self):
self.text('[/em] ')
def blockquote(self, **kwargs):
self.text('[blockquote] ')
def _blockquote(self):
self.text('[/blockquote] ')
def span(self, **kwargs):
self.text('[span] ')
def _span(self):
self.text('[/span] ')
def nav(self, **kwargs):
self.text('[nav] ')
def _nav(self):
self.text('[/nav] ')
def img(self, **kwargs):
self.text('[img] ')
def _img(self):
self.text('[/img] ')
def ul(self, **kwargs):
self.text('[ul] ')
def _ul(self):
self.text('[\ul] ')
def ol(self, **kwargs):
self.text('[ol] ')
def _ol(self):
self.text('[\ol] ')
def li(self, **kwargs):
self.text('[li] ')
def _li(self):
self.text('[\li] ')
def a(self, **kwargs):
self.text('[a] ')
def _a(self):
self.text('[\a] ')
# I / O
def save(self, filePath):
if not filePath.endswith(self.EXTENSION):
filePath += '.' + self.EXTENSION
if not filePath.startswith('/'):
filePath = '/' + filePath
JavaScriptBuilder.save(self, self.PATH_EXPORT + filePath)
|
FrodeSolheim/fs-uae-launcher | refs/heads/master | arcade/glui/gamecenterrunner.py | 3 | import os
import threading
from arcade.glui.state import State
from fsbc.settings import Settings
from fsui.qt import QCursor
class GameCenterRunner(object):
def __init__(
self, controller=None, platform=None, name=None, config=None, **_
):
self.controller = controller
self.platform_name = platform
self.game_name = name
self.config_name = config
self.done = False
self.status = ""
self.error = ""
self.backtrace = None
def prepare(self):
self.done = False
threading.Thread(
target=self._prepare_thread, name="GameRunnerPrepareThread"
).start()
def _prepare_thread(self):
try:
self._do_prepare()
except Exception as e:
import traceback
traceback.print_exc()
self.error = repr(e)
self.backtrace = traceback.format_exc()
self.done = True
self.signal()
def _do_prepare(self):
self.controller.prepare()
self.controller.install()
def configure(self):
self.done = True
# self.status = ""
# self.error = ""
# self.done = False
# threading.Thread(target=self._configure_thread).start()
#
# def _configure_thread(self):
# try:
# self._do_configure()
# except Exception as e:
# import traceback
# traceback.print_exc()
# # signal error by setting status on done
# self.error = repr(e)
# self.backtrace = traceback.format_exc()
# self.done = True
# self.signal()
#
# def _do_configure(self):
# # self.controller.configure_game()
# pass
def run(self):
self.status = ""
self.error = ""
self.done = False
cursor_position = os.environ.get("FSGS_RETURN_CURSOR_TO", "")
try:
x, y = cursor_position.split(",")
x = int(x)
y = int(y)
except ValueError:
pass
else:
# noinspection PyCallByClass,PyTypeChecker
QCursor.setPos(x, y)
# cursor_x = Settings.instance().get("__cursor_x")
# cursor_y = Settings.instance().get("__cursor_y")
# try:
# cursor_x = int(cursor_x)
# cursor_y = int(cursor_y)
# except ValueError:
# pass
# else:
# QCursor.setPos(cursor_x, cursor_y)
threading.Thread(
target=self._run_thread, name="GameRunnerRunThread"
).start()
def abort(self):
self.controller.abort()
def _run_thread(self):
try:
self._do_run()
except Exception as e:
import traceback
traceback.print_exc()
self.error = repr(e)
self.backtrace = traceback.format_exc()
self.done = True
self.signal()
def _do_run(self):
# if fs.windows:
# pass
# elif fs.macosx:
# pass
# else:
# # prevent flashing mouse cursor when launching programs
# # by moving cursor to the bottom right of the screen
# import gtk.gdk
# display = gtk.gdk.display_get_default()
# screen = display.get_default_screen()
# display.warp_pointer(screen, screen.get_width() - 1,
# screen.get_height() - 1)
# size = get_fullscreen_size()
# pygame.mouse.set_cursor((8, 1), (0, 0), (0,), (0,))
# pygame.mouse.set_visible(True)
# pygame.mouse.set_pos(size[0] - 1, size[1] - 0)
state = State.get()
try:
print("state.game_running = True")
state.game_running = True
self.controller.run()
self.controller.wait()
finally:
print("state.game_running = False")
state.game_running = False
self.controller.finish()
def signal(self):
# create a dummy event to wake up the main thread
# event = pygame.event.Event(pygame.USEREVENT)
# pygame.event.post(event)
pass
|
Cactuslegs/audacity-of-nope | refs/heads/master | lib-src/lv2/sord/waflib/Tools/gnu_dirs.py | 329 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils,Options,Context
_options=[x.split(', ')for x in'''
bindir, user executables, ${EXEC_PREFIX}/bin
sbindir, system admin executables, ${EXEC_PREFIX}/sbin
libexecdir, program executables, ${EXEC_PREFIX}/libexec
sysconfdir, read-only single-machine data, ${PREFIX}/etc
sharedstatedir, modifiable architecture-independent data, ${PREFIX}/com
localstatedir, modifiable single-machine data, ${PREFIX}/var
libdir, object code libraries, ${EXEC_PREFIX}/lib
includedir, C header files, ${PREFIX}/include
oldincludedir, C header files for non-gcc, /usr/include
datarootdir, read-only arch.-independent data root, ${PREFIX}/share
datadir, read-only architecture-independent data, ${DATAROOTDIR}
infodir, info documentation, ${DATAROOTDIR}/info
localedir, locale-dependent data, ${DATAROOTDIR}/locale
mandir, man documentation, ${DATAROOTDIR}/man
docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE}
htmldir, html documentation, ${DOCDIR}
dvidir, dvi documentation, ${DOCDIR}
pdfdir, pdf documentation, ${DOCDIR}
psdir, ps documentation, ${DOCDIR}
'''.split('\n')if x]
def configure(conf):
def get_param(varname,default):
return getattr(Options.options,varname,'')or default
env=conf.env
env.LIBDIR=env.BINDIR=[]
env.EXEC_PREFIX=get_param('EXEC_PREFIX',env.PREFIX)
env.PACKAGE=getattr(Context.g_module,'APPNAME',None)or env.PACKAGE
complete=False
iter=0
while not complete and iter<len(_options)+1:
iter+=1
complete=True
for name,help,default in _options:
name=name.upper()
if not env[name]:
try:
env[name]=Utils.subst_vars(get_param(name,default).replace('/',os.sep),env)
except TypeError:
complete=False
if not complete:
lst=[name for name,_,_ in _options if not env[name.upper()]]
raise conf.errors.WafError('Variable substitution failure %r'%lst)
def options(opt):
inst_dir=opt.add_option_group('Installation directories','By default, "waf install" will put the files in\
"/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\
than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"')
for k in('--prefix','--destdir'):
option=opt.parser.get_option(k)
if option:
opt.parser.remove_option(k)
inst_dir.add_option(option)
inst_dir.add_option('--exec-prefix',help='installation prefix [Default: ${PREFIX}]',default='',dest='EXEC_PREFIX')
dirs_options=opt.add_option_group('Pre-defined installation directories','')
for name,help,default in _options:
option_name='--'+name
str_default=default
str_help='%s [Default: %s]'%(help,str_default)
dirs_options.add_option(option_name,help=str_help,default='',dest=name.upper())
|
duyet-website/api.duyet.net | refs/heads/master | lib/faker/providers/internet/de_AT/__init__.py | 19 | # coding=utf-8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = (
'chello.at', 'gmail.com', 'gmx.at', 'kabsi.at',
)
tlds = ('at', 'co.at', 'com', 'net', 'org', )
replacements = (
('ä', 'ae'), ('Ä', 'Ae'),
('ö', 'oe'), ('Ö', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
('ß', 'ss'),
)
|
cliqz/socorro | refs/heads/master | socorro/unittest/collector/test_collector_app.py | 10 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
from nose.tools import eq_
from socorro.collector.collector_app import CollectorApp
from socorro.collector.wsgi_breakpad_collector import BreakpadCollector
from socorro.unittest.testbase import TestCase
from configman.dotdict import DotDict
class TestCollectorApp(TestCase):
def get_standard_config(self):
config = DotDict()
config.logger = mock.MagicMock()
config.collector = DotDict()
config.collector.collector_class = BreakpadCollector
config.collector.dump_id_prefix = 'bp-'
config.collector.dump_field = 'dump'
config.collector.accept_submitted_crash_id = False
config.throttler = DotDict()
self.mocked_throttler = mock.MagicMock()
config.throttler.throttler_class = mock.MagicMock(
return_value=self.mocked_throttler)
config.storage = mock.MagicMock()
self.mocked_crash_storage = mock.MagicMock()
config.storage.crashstorage_class = mock.MagicMock(
return_value=self.mocked_crash_storage
)
config.web_server = mock.MagicMock()
self.mocked_web_server = mock.MagicMock()
config.web_server.wsgi_server_class = mock.MagicMock(
return_value=self.mocked_web_server
)
return config
def test_main(self):
config = self.get_standard_config()
c = CollectorApp(config)
c.main()
eq_(config.crash_storage, self.mocked_crash_storage)
eq_(config.throttler, self.mocked_throttler)
eq_(c.web_server, self.mocked_web_server)
config.storage.crashstorage_class.assert_called_with(config.storage)
config.web_server.wsgi_server_class.assert_called_with(
config,
(BreakpadCollector, )
)
|
sharifulgeo/networkx | refs/heads/master | networkx/utils/misc.py | 14 | """
Miscellaneous Helpers for NetworkX.
These are not imported into the base networkx namespace but
can be accessed, for example, as
>>> import networkx
>>> networkx.utils.is_string_like('spam')
True
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import sys
import uuid
# itertools.accumulate is only available on Python 3.2 or later.
#
# Once support for Python versions less than 3.2 is dropped, this code should
# be removed.
try:
from itertools import accumulate
except ImportError:
import operator
# The code for this function is from the Python 3.5 documentation,
# distributed under the PSF license:
# <https://docs.python.org/3.5/library/itertools.html#itertools.accumulate>
def accumulate(iterable, func=operator.add):
it = iter(iterable)
try:
total = next(it)
except StopIteration:
return
yield total
for element in it:
total = func(total, element)
yield total
__author__ = '\n'.join(['Aric Hagberg (hagberg@lanl.gov)',
'Dan Schult(dschult@colgate.edu)',
'Ben Edwards(bedwards@cs.unm.edu)'])
### some cookbook stuff
# used in deciding whether something is a bunch of nodes, edges, etc.
# see G.add_nodes and others in Graph Class in networkx/base.py
def is_string_like(obj): # from John Hunter, types-free version
"""Check if obj is string."""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def iterable(obj):
""" Return True if obj is iterable with a well-defined len()."""
if hasattr(obj,"__iter__"): return True
try:
len(obj)
except:
return False
return True
def flatten(obj, result=None):
""" Return flattened version of (possibly nested) iterable object. """
if not iterable(obj) or is_string_like(obj):
return obj
if result is None:
result = []
for item in obj:
if not iterable(item) or is_string_like(item):
result.append(item)
else:
flatten(item, result)
return obj.__class__(result)
def is_list_of_ints( intlist ):
""" Return True if list is a list of ints. """
if not isinstance(intlist,list): return False
for i in intlist:
if not isinstance(i,int): return False
return True
PY2 = sys.version_info[0] == 2
if PY2:
def make_str(x):
"""Return the string representation of t."""
if isinstance(x, unicode):
return x
else:
# Note, this will not work unless x is ascii-encoded.
# That is good, since we should be working with unicode anyway.
# Essentially, unless we are reading a file, we demand that users
# convert any encoded strings to unicode before using the library.
#
# Also, the str() is necessary to convert integers, etc.
# unicode(3) works, but unicode(3, 'unicode-escape') wants a buffer.
#
return unicode(str(x), 'unicode-escape')
else:
def make_str(x):
"""Return the string representation of t."""
return str(x)
def generate_unique_node():
""" Generate a unique node label."""
return str(uuid.uuid1())
def default_opener(filename):
"""Opens `filename` using system's default program.
Parameters
----------
filename : str
The path of the file to be opened.
"""
from subprocess import call
cmds = {'darwin': ['open'],
'linux2': ['xdg-open'],
'win32': ['cmd.exe', '/C', 'start', '']}
cmd = cmds[sys.platform] + [filename]
call(cmd)
def dict_to_numpy_array(d,mapping=None):
"""Convert a dictionary of dictionaries to a numpy array
with optional mapping."""
try:
return dict_to_numpy_array2(d, mapping)
except (AttributeError, TypeError):
# AttributeError is when no mapping was provided and v.keys() fails.
# TypeError is when a mapping was provided and d[k1][k2] fails.
return dict_to_numpy_array1(d,mapping)
def dict_to_numpy_array2(d,mapping=None):
"""Convert a dictionary of dictionaries to a 2d numpy array
with optional mapping.
"""
import numpy
if mapping is None:
s=set(d.keys())
for k,v in d.items():
s.update(v.keys())
mapping=dict(zip(s,range(len(s))))
n=len(mapping)
a = numpy.zeros((n, n))
for k1, i in mapping.items():
for k2, j in mapping.items():
try:
a[i,j]=d[k1][k2]
except KeyError:
pass
return a
def dict_to_numpy_array1(d,mapping=None):
"""Convert a dictionary of numbers to a 1d numpy array
with optional mapping.
"""
import numpy
if mapping is None:
s = set(d.keys())
mapping = dict(zip(s,range(len(s))))
n = len(mapping)
a = numpy.zeros(n)
for k1,i in mapping.items():
i = mapping[k1]
a[i] = d[k1]
return a
def is_iterator(obj):
"""Returns ``True`` if and only if the given object is an iterator
object.
"""
has_next_attr = hasattr(obj, '__next__') or hasattr(obj, 'next')
return iter(obj) is obj and has_next_attr
def arbitrary_element(iterable):
"""Returns an arbitrary element of ``iterable`` without removing it.
This is most useful for "peeking" at an arbitrary element of a set,
but can be used for any list, dictionary, etc., as well::
>>> arbitrary_element({3, 2, 1})
1
>>> arbitrary_element('hello')
'h'
This function raises a :exc:`ValueError` if ``iterable`` is an
iterator (because the current implementation of this function would
consume an element from the iterator)::
>>> iterator = iter([1, 2, 3])
>>> arbitrary_element(iterator)
Traceback (most recent call last):
...
ValueError: cannot return an arbitrary item from an iterator
"""
if is_iterator(iterable):
raise ValueError('cannot return an arbitrary item from an iterator')
# Another possible implementation is `for x in iterable: return x`.
return next(iter(iterable))
|
JDShu/SCOPE | refs/heads/master | askbot/conf/sidebar_question.py | 4 | """
Sidebar settings
"""
from askbot.conf.settings_wrapper import settings
from askbot.deps.livesettings import ConfigurationGroup
from askbot.deps.livesettings import values
from django.utils.translation import ugettext as _
from askbot.conf.super_groups import CONTENT_AND_UI
SIDEBAR_QUESTION = ConfigurationGroup(#shitty name - why sidebar?
'SIDEBAR_QUESTION',
_('Question page banners and sidebar'),
super_group = CONTENT_AND_UI
)
settings.register(
values.LongStringValue(
SIDEBAR_QUESTION,
'QUESTION_PAGE_TOP_BANNER',
description = _('Top banner'),
default = '',
help_text = _(
'When using this option, please '
'use the HTML validation service to make sure that '
'your input is valid and works well in all browsers.'
)
)
)
settings.register(
values.LongStringValue(
SIDEBAR_QUESTION,
'SIDEBAR_QUESTION_HEADER',
description = _('Custom sidebar header'),
default = '',
help_text = _(
'Use this area to enter content at the TOP of the sidebar'
'in HTML format. When using this option '
'(as well as the sidebar footer), please '
'use the HTML validation service to make sure that '
'your input is valid and works well in all browsers.'
)
)
)
settings.register(
values.BooleanValue(
SIDEBAR_QUESTION,
'SIDEBAR_QUESTION_SHOW_TAGS',
description = _('Show tag list in sidebar'),
help_text = _(
'Uncheck this if you want to hide the tag '
'list from the sidebar '
),
default = True
)
)
settings.register(
values.BooleanValue(
SIDEBAR_QUESTION,
'SIDEBAR_QUESTION_SHOW_META',
description = _('Show meta information in sidebar'),
help_text = _(
'Uncheck this if you want to hide the meta '
'information about the question (post date, '
'views, last updated). '
),
default = True
)
)
settings.register(
values.BooleanValue(
SIDEBAR_QUESTION,
'SIDEBAR_QUESTION_SHOW_RELATED',
description = _('Show related questions in sidebar'),
help_text = _(
'Uncheck this if you want to hide the list '
'of related questions. '
),
default = True
)
)
settings.register(
values.LongStringValue(
SIDEBAR_QUESTION,
'SIDEBAR_QUESTION_FOOTER',
description = _('Custom sidebar footer'),
default = '',
help_text = _(
'Use this area to enter content at the BOTTOM of the sidebar'
'in HTML format. When using this option '
'(as well as the sidebar header), please '
'use the HTML validation service to make sure that '
'your input is valid and works well in all browsers.'
)
)
)
|
bjorand/django-allauth | refs/heads/master | allauth/socialaccount/providers/fxa/urls.py | 73 | from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import FirefoxAccountsProvider
urlpatterns = default_urlpatterns(FirefoxAccountsProvider)
|
hujiajie/chromium-crosswalk | refs/heads/master | tools/telemetry/third_party/pyfakefs/pyfakefs/fake_filesystem_shutil.py | 23 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable-msg=W0612,W0613,C6409
"""A fake shutil module implementation that uses fake_filesystem for unit tests.
Includes:
FakeShutil: Uses a FakeFilesystem to provide a fake replacement for the
shutil module.
Usage:
>>> import fake_filesystem
>>> import fake_filesystem_shutil
>>> filesystem = fake_filesystem.FakeFilesystem()
>>> shutil_module = fake_filesystem_shutil.FakeShutilModule(filesystem)
Copy a fake_filesystem directory tree:
>>> new_file = filesystem.CreateFile('/src/new-file')
>>> shutil_module.copytree('/src', '/dst')
>>> filesystem.Exists('/dst/new-file')
True
Remove a fake_filesystem directory tree:
>>> shutil_module.rmtree('/src')
>>> filesystem.Exists('/src/new-file')
False
"""
import errno
import os
import shutil
import stat
__pychecker__ = 'no-reimportself'
_PERM_WRITE = 0o200 # Write permission bit.
_PERM_READ = 0o400 # Read permission bit.
_PERM_ALL = 0o7777 # All permission bits.
class FakeShutilModule(object):
"""Uses a FakeFilesystem to provide a fake replacement for shutil module."""
def __init__(self, filesystem):
"""Construct fake shutil module using the fake filesystem.
Args:
filesystem: FakeFilesystem used to provide file system information
"""
self.filesystem = filesystem
self._shutil_module = shutil
def rmtree(self, path, ignore_errors=False, onerror=None):
"""Remove a directory and all its contents.
Args:
path: (str) Directory tree to remove.
ignore_errors: (bool) unimplemented
onerror: (func) unimplemented
"""
self.filesystem.RemoveObject(path)
def copy(self, src, dst):
"""Copy data and mode bits ("cp src dst").
Args:
src: (str) source file
dst: (str) destination, may be a directory
"""
if self.filesystem.Exists(dst):
if stat.S_ISDIR(self.filesystem.GetObject(dst).st_mode):
dst = self.filesystem.JoinPaths(dst, os.path.basename(src))
self.copyfile(src, dst)
src_object = self.filesystem.GetObject(src)
dst_object = self.filesystem.GetObject(dst)
dst_object.st_mode = ((dst_object.st_mode & ~_PERM_ALL) |
(src_object.st_mode & _PERM_ALL))
def copyfile(self, src, dst):
"""Copy data from src to dst.
Args:
src: (str) source file
dst: (dst) destination file
Raises:
IOError: if the file can't be copied
shutil.Error: if the src and dst files are the same
"""
src_file_object = self.filesystem.GetObject(src)
if not src_file_object.st_mode & _PERM_READ:
raise IOError(errno.EACCES, 'Permission denied', src)
if stat.S_ISDIR(src_file_object.st_mode):
raise IOError(errno.EISDIR, 'Is a directory', src)
dst_dir = os.path.dirname(dst)
if dst_dir:
if not self.filesystem.Exists(dst_dir):
raise IOError(errno.ENOTDIR, 'Not a directory', dst)
dst_dir_object = self.filesystem.GetObject(dst_dir)
if not dst_dir_object.st_mode & _PERM_WRITE:
raise IOError(errno.EACCES, 'Permission denied', dst_dir)
abspath_src = self.filesystem.NormalizePath(
self.filesystem.ResolvePath(src))
abspath_dst = self.filesystem.NormalizePath(
self.filesystem.ResolvePath(dst))
if abspath_src == abspath_dst:
raise shutil.Error('`%s` and `%s` are the same file' % (src, dst))
if self.filesystem.Exists(dst):
dst_file_object = self.filesystem.GetObject(dst)
if stat.S_ISDIR(dst_file_object.st_mode):
raise IOError(errno.EISDIR, 'Is a directory', dst)
if not dst_file_object.st_mode & _PERM_WRITE:
raise IOError(errno.EACCES, 'Permission denied', dst)
dst_file_object.SetContents(src_file_object.contents)
else:
self.filesystem.CreateFile(dst, contents=src_file_object.contents)
def copystat(self, src, dst):
"""Copy all stat info (mode bits, atime, and mtime) from src to dst.
Args:
src: (str) source file
dst: (str) destination file
"""
src_object = self.filesystem.GetObject(src)
dst_object = self.filesystem.GetObject(dst)
dst_object.st_mode = ((dst_object.st_mode & ~_PERM_ALL) |
(src_object.st_mode & _PERM_ALL))
dst_object.st_uid = src_object.st_uid
dst_object.st_gid = src_object.st_gid
dst_object.st_atime = src_object.st_atime
dst_object.st_mtime = src_object.st_mtime
def copy2(self, src, dst):
"""Copy data and all stat info ("cp -p src dst").
Args:
src: (str) source file
dst: (str) destination, may be a directory
"""
if self.filesystem.Exists(dst):
if stat.S_ISDIR(self.filesystem.GetObject(dst).st_mode):
dst = self.filesystem.JoinPaths(dst, os.path.basename(src))
self.copyfile(src, dst)
self.copystat(src, dst)
def copytree(self, src, dst, symlinks=False):
"""Recursively copy a directory tree.
Args:
src: (str) source directory
dst: (str) destination directory, must not already exist
symlinks: (bool) copy symlinks as symlinks instead of copying the
contents of the linked files. Currently unused.
Raises:
OSError: if src is missing or isn't a directory
"""
self.filesystem.CreateDirectory(dst)
try:
directory = self.filesystem.GetObject(src)
except IOError as e:
raise OSError(e.errno, e.message)
if not stat.S_ISDIR(directory.st_mode):
raise OSError(errno.ENOTDIR,
'Fake os module: %r not a directory' % src)
for name in directory.contents:
srcname = self.filesystem.JoinPaths(src, name)
dstname = self.filesystem.JoinPaths(dst, name)
src_mode = self.filesystem.GetObject(srcname).st_mode
if stat.S_ISDIR(src_mode):
self.copytree(srcname, dstname, symlinks)
else:
self.copy2(srcname, dstname)
def move(self, src, dst):
"""Rename a file or directory.
Args:
src: (str) source file or directory
dst: (str) if the src is a directory, the dst must not already exist
"""
if stat.S_ISDIR(self.filesystem.GetObject(src).st_mode):
self.copytree(src, dst, symlinks=True)
else:
self.copy2(src, dst)
self.filesystem.RemoveObject(src)
def __getattr__(self, name):
"""Forwards any non-faked calls to the standard shutil module."""
return getattr(self._shutil_module, name)
def _RunDoctest():
# pylint: disable-msg=C6111,C6204,W0406
import doctest
import fake_filesystem_shutil
return doctest.testmod(fake_filesystem_shutil)
if __name__ == '__main__':
_RunDoctest()
|
kenshay/ImageScript | refs/heads/master | Script_Runner/PYTHON/Lib/smtplib.py | 5 | #! /usr/bin/env python3
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print(s.help())
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import io
import re
import email.utils
import email.message
import email.generator
import base64
import hmac
import copy
import datetime
import sys
from email.base64mime import body_encode as encode_base64
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
bCRLF = b"\r\n"
_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(OSError):
"""Base class for all exceptions raised by this module."""
class SMTPNotSupportedError(SMTPException):
"""The command or option is not supported by the SMTP server.
This exception is raised when an attempt is made to run a command or a
command with an option which is not supported by the server.
"""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addrstring):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything email.utils.parseaddr can handle.
"""
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, use it as is and hope for the best.
if addrstring.strip().startswith('<'):
return addrstring
return "<%s>" % addrstring
return "<%s>" % addr
def _addr_only(addrstring):
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, so use it as is.
return addrstring
return addr
# Legacy method kept for backward compatibility.
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
def _quote_periods(bindata):
return re.sub(br'(?m)^\.', b'..', bindata)
def _fix_eols(data):
return re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data)
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. If a host is specified the
connect method is called, and if it returns anything other than a
success code an SMTPConnectError is raised. If specified,
`local_hostname` is used as the FQDN of the local host in the HELO/EHLO
command. Otherwise, the local hostname is found using
socket.getfqdn(). The `source_address` parameter takes a 2-tuple (host,
port) for the socket to bind to as its source address before
connecting. If the host is '' and port is 0, the OS default behavior
will be used.
"""
self._host = host
self.timeout = timeout
self.esmtp_features = {}
self.command_encoding = 'ascii'
self.source_address = source_address
if host:
(code, msg) = self.connect(host, port)
if code != 220:
self.close()
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def __enter__(self):
return self
def __exit__(self, *args):
try:
code, message = self.docmd("QUIT")
if code != 221:
raise SMTPResponseException(code, message)
except SMTPServerDisconnected:
pass
finally:
self.close()
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _print_debug(self, *args):
if self.debuglevel > 1:
print(datetime.datetime.now().time(), *args, file=sys.stderr)
else:
print(*args, file=sys.stderr)
def _get_socket(self, host, port, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
self._print_debug('connect: to', (host, port), self.source_address)
return socket.create_connection((host, port), timeout,
self.source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if source_address:
self.source_address = source_address
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise OSError("nonnumeric port")
if not port:
port = self.default_port
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
self.sock = self._get_socket(host, port, self.timeout)
self.file = None
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', repr(msg))
return (code, msg)
def send(self, s):
"""Send `s' to the server."""
if self.debuglevel > 0:
self._print_debug('send:', repr(s))
if hasattr(self, 'sock') and self.sock:
if isinstance(s, str):
# send is used by the 'data' command, where command_encoding
# should not be used, but 'data' needs to convert the string to
# binary itself anyway, so that's not a problem.
s = s.encode(self.command_encoding)
try:
self.sock.sendall(s)
except OSError:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline(_MAXLINE + 1)
except OSError as e:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed: "
+ str(e))
if not line:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
self._print_debug('reply:', repr(line))
if len(line) > _MAXLINE:
self.close()
raise SMTPResponseException(500, "Line too long.")
resp.append(line[4:].strip(b' \t\r\n'))
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != b"-":
break
errmsg = b"\n".join(resp)
if self.debuglevel > 0:
self._print_debug('reply: retcode (%s); Msg: %a' % (errcode, errmsg))
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
#parse the ehlo response -ddm
assert isinstance(self.ehlo_resp, bytes), repr(self.ehlo_resp)
resp = self.ehlo_resp.decode("latin-1").split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
self.command_encoding = 'ascii'
return self.docmd("rset")
def _rset(self):
"""Internal 'rset' command which ignores any SMTPServerDisconnected error.
Used internally in the library, since the server disconnected error
should appear to the application when the *next* command is issued, if
we are doing an internal "safety" reset.
"""
try:
self.rset()
except SMTPServerDisconnected:
pass
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=[]):
"""SMTP 'mail' command -- begins mail xfer session.
This method may raise the following exceptions:
SMTPNotSupportedError The options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
"""
optionlist = ''
if options and self.does_esmtp:
if any(x.lower()=='smtputf8' for x in options):
if self.has_extn('smtputf8'):
self.command_encoding = 'utf-8'
else:
raise SMTPNotSupportedError(
'SMTPUTF8 not supported by server')
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent. If msg
is a string, lone '\\r' and '\\n' characters are converted to
'\\r\\n' characters. If msg is bytes, it is transmitted as is.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, repl))
if code != 354:
raise SMTPDataError(code, repl)
else:
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
q = _quote_periods(msg)
if q[-2:] != bCRLF:
q = q + bCRLF
q = q + b"." + bCRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('data:', (code, msg))
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", _addr_only(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", _addr_only(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def auth(self, mechanism, authobject, *, initial_response_ok=True):
"""Authentication command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - the valid values are those listed in the 'auth'
element of 'esmtp_features'.
'authobject' must be a callable object taking a single argument:
data = authobject(challenge)
It will be called to process the server's challenge response; the
challenge argument it is passed will be a bytes. It should return
bytes data that will be base64 encoded and sent to the server.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
"""
# RFC 4954 allows auth methods to provide an initial response. Not all
# methods support it. By definition, if they return something other
# than None when challenge is None, then they do. See issue #15014.
mechanism = mechanism.upper()
initial_response = (authobject() if initial_response_ok else None)
if initial_response is not None:
response = encode_base64(initial_response.encode('ascii'), eol='')
(code, resp) = self.docmd("AUTH", mechanism + " " + response)
else:
(code, resp) = self.docmd("AUTH", mechanism)
# If server responds with a challenge, send the response.
if code == 334:
challenge = base64.decodebytes(resp)
response = encode_base64(
authobject(challenge).encode('ascii'), eol='')
(code, resp) = self.docmd(response)
if code in (235, 503):
return (code, resp)
raise SMTPAuthenticationError(code, resp)
def auth_cram_md5(self, challenge=None):
""" Authobject to use with CRAM-MD5 authentication. Requires self.user
and self.password to be set."""
# CRAM-MD5 does not support initial-response.
if challenge is None:
return None
return self.user + " " + hmac.HMAC(
self.password.encode('ascii'), challenge, 'md5').hexdigest()
def auth_plain(self, challenge=None):
""" Authobject to use with PLAIN authentication. Requires self.user and
self.password to be set."""
return "\0%s\0%s" % (self.user, self.password)
def auth_login(self, challenge=None):
""" Authobject to use with LOGIN authentication. Requires self.user and
self.password to be set."""
if challenge is None:
return self.user
else:
return self.password
def login(self, user, password, *, initial_response_ok=True):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
Keyword arguments:
- initial_response_ok: Allow sending the RFC 4954 initial-response
to the AUTH command, if the authentication methods supports it.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPNotSupportedError The AUTH command is not supported by the
server.
SMTPException No suitable authentication method was
found.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPNotSupportedError(
"SMTP AUTH extension not supported by server.")
# Authentication methods the server claims to support
advertised_authlist = self.esmtp_features["auth"].split()
# Authentication methods we can handle in our preferred order:
preferred_auths = ['CRAM-MD5', 'PLAIN', 'LOGIN']
# We try the supported authentications in our preferred order, if
# the server supports them.
authlist = [auth for auth in preferred_auths
if auth in advertised_authlist]
if not authlist:
raise SMTPException("No suitable authentication method found.")
# Some servers advertise authentication methods they don't really
# support, so if authentication fails, we continue until we've tried
# all methods.
self.user, self.password = user, password
for authmethod in authlist:
method_name = 'auth_' + authmethod.lower().replace('-', '_')
try:
(code, resp) = self.auth(
authmethod, getattr(self, method_name),
initial_response_ok=initial_response_ok)
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
if code in (235, 503):
return (code, resp)
except SMTPAuthenticationError as e:
last_exception = e
# We could not login successfully. Return result of last attempt.
raise last_exception
def starttls(self, keyfile=None, certfile=None, context=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPNotSupportedError(
"STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a"
"custom context instead", DeprecationWarning, 2)
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.sock = context.wrap_socket(self.sock,
server_hostname=self._host)
self.file = None
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
else:
# RFC 3207:
# 501 Syntax error (no parameters allowed)
# 454 TLS not available due to temporary reason
raise SMTPResponseException(resp, reply)
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
msg may be a string containing characters in the ASCII range, or a byte
string. A string is encoded to bytes using the ascii codec, and lone
\\r and \\n characters are converted to \\r\\n characters.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
SMTPNotSupportedError The mail_options parameter includes 'SMTPUTF8'
but the SMTPUTF8 extension is not supported by
the server.
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if isinstance(msg, str):
msg = _fix_eols(msg).encode('ascii')
if self.does_esmtp:
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if code == 421:
self.close()
raise SMTPRecipientsRefused(senderrs)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self._rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
if code == 421:
self.close()
else:
self._rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def send_message(self, msg, from_addr=None, to_addrs=None,
mail_options=[], rcpt_options={}):
"""Converts message to a bytestring and passes it to sendmail.
The arguments are as for sendmail, except that msg is an
email.message.Message object. If from_addr is None or to_addrs is
None, these arguments are taken from the headers of the Message as
described in RFC 2822 (a ValueError is raised if there is more than
one set of 'Resent-' headers). Regardless of the values of from_addr and
to_addr, any Bcc field (or Resent-Bcc field, when the Message is a
resent) of the Message object won't be transmitted. The Message
object is then serialized using email.generator.BytesGenerator and
sendmail is called to transmit the message. If the sender or any of
the recipient addresses contain non-ASCII and the server advertises the
SMTPUTF8 capability, the policy is cloned with utf8 set to True for the
serialization, and SMTPUTF8 and BODY=8BITMIME are asserted on the send.
If the server does not support SMTPUTF8, an SMTPNotSupported error is
raised. Otherwise the generator is called without modifying the
policy.
"""
# 'Resent-Date' is a mandatory field if the Message is resent (RFC 2822
# Section 3.6.6). In such a case, we use the 'Resent-*' fields. However,
# if there is more than one 'Resent-' block there's no way to
# unambiguously determine which one is the most recent in all cases,
# so rather than guess we raise a ValueError in that case.
#
# TODO implement heuristics to guess the correct Resent-* block with an
# option allowing the user to enable the heuristics. (It should be
# possible to guess correctly almost all of the time.)
self.ehlo_or_helo_if_needed()
resent = msg.get_all('Resent-Date')
if resent is None:
header_prefix = ''
elif len(resent) == 1:
header_prefix = 'Resent-'
else:
raise ValueError("message has more than one 'Resent-' header block")
if from_addr is None:
# Prefer the sender field per RFC 2822:3.6.2.
from_addr = (msg[header_prefix + 'Sender']
if (header_prefix + 'Sender') in msg
else msg[header_prefix + 'From'])
from_addr = email.utils.getaddresses([from_addr])[0][1]
if to_addrs is None:
addr_fields = [f for f in (msg[header_prefix + 'To'],
msg[header_prefix + 'Bcc'],
msg[header_prefix + 'Cc'])
if f is not None]
to_addrs = [a[1] for a in email.utils.getaddresses(addr_fields)]
# Make a local copy so we can delete the bcc headers.
msg_copy = copy.copy(msg)
del msg_copy['Bcc']
del msg_copy['Resent-Bcc']
international = False
try:
''.join([from_addr, *to_addrs]).encode('ascii')
except UnicodeEncodeError:
if not self.has_extn('smtputf8'):
raise SMTPNotSupportedError(
"One or more source or delivery addresses require"
" internationalized email support, but the server"
" does not advertise the required SMTPUTF8 capability")
international = True
with io.BytesIO() as bytesmsg:
if international:
g = email.generator.BytesGenerator(
bytesmsg, policy=msg.policy.clone(utf8=True))
mail_options += ['SMTPUTF8', 'BODY=8BITMIME']
else:
g = email.generator.BytesGenerator(bytesmsg)
g.flatten(msg_copy, linesep='\r\n')
flatmsg = bytesmsg.getvalue()
return self.sendmail(from_addr, to_addrs, flatmsg, mail_options,
rcpt_options)
def close(self):
"""Close the connection to the SMTP server."""
try:
file = self.file
self.file = None
if file:
file.close()
finally:
sock = self.sock
self.sock = None
if sock:
sock.close()
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
# A new EHLO is required after reconnecting with connect()
self.ehlo_resp = self.helo_resp = None
self.esmtp_features = {}
self.does_esmtp = False
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL
encrypted socket (to use this class you need a socket module that was
compiled with SSL support). If host is not specified, '' (the local
host) is used. If port is omitted, the standard SMTP-over-SSL port
(465) is used. local_hostname and source_address have the same meaning
as they do in the SMTP class. keyfile and certfile are also optional -
they can contain a PEM formatted private key and certificate chain file
for the SSL connection. context also optional, can contain a
SSLContext, and is an alternative to keyfile and certfile; If it is
specified both keyfile and certfile must be None.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, context=None):
if context is not None and keyfile is not None:
raise ValueError("context and keyfile arguments are mutually "
"exclusive")
if context is not None and certfile is not None:
raise ValueError("context and certfile arguments are mutually "
"exclusive")
if keyfile is not None or certfile is not None:
import warnings
warnings.warn("keyfile and certfile are deprecated, use a"
"custom context instead", DeprecationWarning, 2)
self.keyfile = keyfile
self.certfile = certfile
if context is None:
context = ssl._create_stdlib_context(certfile=certfile,
keyfile=keyfile)
self.context = context
SMTP.__init__(self, host, port, local_hostname, timeout,
source_address)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self._print_debug('connect:', (host, port))
new_socket = socket.create_connection((host, port), timeout,
self.source_address)
new_socket = self.context.wrap_socket(new_socket,
server_hostname=self._host)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for
LMTP, so our connect() method must support that as well as a regular
host:port server. local_hostname and source_address have the same
meaning as they do in the SMTP class. To specify a Unix socket,
you must use an absolute path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None,
source_address=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname=local_hostname,
source_address=source_address)
def connect(self, host='localhost', port=0, source_address=None):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port, source_address=source_address)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.file = None
self.sock.connect(host)
except OSError:
if self.debuglevel > 0:
self._print_debug('connect fail:', host)
if self.sock:
self.sock.close()
self.sock = None
raise
(code, msg) = self.getreply()
if self.debuglevel > 0:
self._print_debug('connect:', msg)
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
def prompt(prompt):
sys.stdout.write(prompt + ": ")
sys.stdout.flush()
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print("Enter message, end with ^D:")
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print("Message length is %d" % len(msg))
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
|
shadyueh/pyranking | refs/heads/master | env/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshistream.py | 1730 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
zhulin2609/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py | 124 | # Copyright (C) 2010 Apple Inc. All rights reserved.
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Unit tests for watchlist.py.'''
import unittest2 as unittest
import watchlist
class MockErrorHandler(object):
def __init__(self, handle_style_error):
self.turned_off_filtering = False
self._handle_style_error = handle_style_error
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
return True
class WatchListTest(unittest.TestCase):
def test_basic_error_message(self):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
mock_error_handler.had_error = True
self.assertEqual(0, line_number)
self.assertEqual('watchlist/general', category)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
checker = watchlist.WatchListChecker('watchlist', error_handler)
checker.check(['{"DEFINTIONS": {}}'])
self.assertTrue(error_handler.had_error)
self.assertTrue(error_handler.turned_off_filtering)
|
0x1100/glad | refs/heads/master | glad/lang/__init__.py | 3 | import glad.lang.c
import glad.lang.d
import glad.lang.volt
def get_generator(name, spec):
_langs = [glad.lang.c, glad.lang.d, glad.lang.volt]
for lang in _langs:
gen, loader = lang.get_generator(name, spec)
if gen is not None:
return gen, loader
return None, None
|
vrenaville/hr | refs/heads/8.0 | __unported__/hr_infraction/hr_infraction.py | 27 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import time
from openerp.osv import fields, orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.translate import _
class hr_infraction_category(orm.Model):
_name = 'hr.infraction.category'
_description = 'Infraction Type'
_columns = {
'name': fields.char(
'Name',
required=True,
),
'code': fields.char(
'Code',
required=True,
),
}
class hr_infraction(orm.Model):
_name = 'hr.infraction'
_description = 'Infraction'
_inherit = ['mail.thread', 'ir.needaction_mixin']
_columns = {
'name': fields.char(
'Subject',
size=256,
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
),
'date': fields.date(
'Date',
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
),
'employee_id': fields.many2one(
'hr.employee',
'Employee',
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
),
'category_id': fields.many2one(
'hr.infraction.category',
'Category',
required=True,
readonly=True,
states={'draft': [('readonly', False)]},
),
'action_ids': fields.one2many(
'hr.infraction.action',
'infraction_id',
'Actions',
readonly=True,
),
'memo': fields.text(
'Description',
readonly=True,
states={'draft': [('readonly', False)]},
),
'state': fields.selection(
[
('draft', 'Draft'),
('confirm', 'Confirmed'),
('action', 'Actioned'),
('noaction', 'No Action'),
],
'State',
readonly=True,
),
}
_defaults = {
'date': time.strftime(DEFAULT_SERVER_DATE_FORMAT),
'state': 'draft',
}
_track = {
'state': {
'hr_infraction.mt_alert_infraction_confirmed': (
lambda self, cr, u, obj, ctx=None: obj['state'] == 'confirm'),
'hr_infraction.mt_alert_infraction_action': (
lambda self, cr, u, obj, ctx=None: obj['state'] == 'action'),
'hr_infraction.mt_alert_infraction_noaction': (
lambda self, cr, u, obj, ctx=None: obj['state'] == 'noaction'),
},
}
def _needaction_domain_get(self, cr, uid, context=None):
users_obj = self.pool.get('res.users')
domain = []
if users_obj.has_group(cr, uid, 'base.group_hr_manager'):
domain = [('state', '=', 'confirm')]
if len(domain) == 0:
return False
return domain
def unlink(self, cr, uid, ids, context=None):
for infraction in self.browse(cr, uid, ids, context=context):
if infraction.state not in ['draft']:
raise orm.except_orm(
_('Error'),
_('Infractions that have progressed beyond "Draft" state '
'may not be removed.')
)
return super(hr_infraction, self).unlink(cr, uid, ids, context=context)
def onchange_category(self, cr, uid, ids, category_id, context=None):
res = {'value': {'name': False}}
if category_id:
category = self.pool.get('hr.infraction.category').browse(
cr, uid, category_id, context=context
)
res['value']['name'] = category.name
return res
ACTION_TYPE_SELECTION = [
('warning_verbal', 'Verbal Warning'),
('warning_letter', 'Written Warning'),
('transfer', 'Transfer'),
('suspension', 'Suspension'),
('dismissal', 'Dismissal'),
]
class hr_infraction_action(orm.Model):
_name = 'hr.infraction.action'
_description = 'Action Based on Infraction'
_columns = {
'infraction_id': fields.many2one(
'hr.infraction',
'Infraction',
ondelete='cascade',
required=True,
readonly=True,
),
'type': fields.selection(
ACTION_TYPE_SELECTION,
'Type',
required=True,
),
'memo': fields.text(
'Notes',
),
'employee_id': fields.related(
'infraction_id',
'employee_id',
type='many2one',
store=True,
obj='hr.employee',
string='Employee',
readonly=True,
),
'warning_id': fields.many2one(
'hr.infraction.warning',
'Warning',
readonly=True,
),
'transfer_id': fields.many2one(
'hr.department.transfer',
'Transfer',
readonly=True,
),
}
_rec_name = 'type'
def unlink(self, cr, uid, ids, context=None):
for action in self.browse(cr, uid, ids, context=context):
if action.infraction_id.state not in ['draft']:
raise orm.except_orm(
_('Error'),
_('Actions belonging to Infractions not in "Draft" state '
'may not be removed.')
)
return super(hr_infraction_action, self).unlink(
cr, uid, ids, context=context
)
class hr_warning(orm.Model):
_name = 'hr.infraction.warning'
_description = 'Employee Warning'
_columns = {
'name': fields.char(
'Subject',
size=256,
),
'date': fields.date(
'Date Issued',
),
'type': fields.selection(
[
('verbal', 'Verbal'),
('written', 'Written'),
],
'Type',
required=True,
),
'action_id': fields.many2one(
'hr.infraction.action',
'Action',
ondelete='cascade',
readonly=True,
),
'infraction_id': fields.related(
'action_id',
'infraction_id',
type='many2one',
obj='hr.infraction',
string='Infraction',
readonly=True,
),
'employee_id': fields.related(
'infraction_id',
'employee_id',
type='many2one',
obj='hr.employee',
string='Employee',
readonly=True,
),
}
_defaults = {
'type': 'written',
'date': time.strftime(DEFAULT_SERVER_DATE_FORMAT),
}
def unlink(self, cr, uid, ids, context=None):
for warning in self.browse(cr, uid, ids, context=context):
if (warning.action_id
and warning.action_id.infraction_id.state != 'draft'):
raise orm.except_orm(
_('Error'),
_('Warnings attached to Infractions not in "Draft" state '
'may not be removed.')
)
return super(hr_warning, self).unlink(cr, uid, ids, context=context)
class hr_employee(orm.Model):
_name = 'hr.employee'
_inherit = 'hr.employee'
_columns = {
'infraction_ids': fields.one2many(
'hr.infraction',
'employee_id',
'Infractions',
readonly=True,
),
'infraction_action_ids': fields.one2many(
'hr.infraction.action',
'employee_id',
'Disciplinary Actions',
readonly=True,
),
}
|
deercoder/0-PhD | refs/heads/master | TensorFlow/ex2/example2.py | 3 | import numpy as np
import tensorflow as tf
# Declare list of features, we only have one real-valued feature
def model(features, labels, mode):
# Build a linear model and predict values
W = tf.get_variable("W", [1], dtype=tf.float64)
b = tf.get_variable("b", [1], dtype=tf.float64)
y = W*features['x'] + b
# Loss sub-graph
loss = tf.reduce_sum(tf.square(y - labels))
# Training sub-graph
global_step = tf.train.get_global_step()
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = tf.group(optimizer.minimize(loss),
tf.assign_add(global_step, 1))
# ModelFnOps connects subgraphs we built to the
# appropriate functionality.
return tf.contrib.learn.ModelFnOps(
mode=mode, predictions=y,
loss=loss,
train_op=train)
estimator = tf.contrib.learn.Estimator(model_fn=model)
# define our data set
x = np.array([1., 2., 3., 4.])
y = np.array([0., -1., -2., -3.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x}, y, 4, num_epochs=1000)
# train
estimator.fit(input_fn=input_fn, steps=1000)
# evaluate our model
print(estimator.evaluate(input_fn=input_fn, steps=10))
|
muffinresearch/addons-server | refs/heads/master | lib/es/models.py | 17 | from django.db import models
from django.utils import timezone
class ReindexingManager(models.Manager):
"""Used to flag when an elasticsearch reindexing is occuring."""
def _flag_reindexing(self, site, new_index, old_index, alias):
"""Flag the database for a reindex on the given site."""
if self._is_reindexing(site):
return # Already flagged.
return self.create(new_index=new_index,
old_index=old_index,
alias=alias,
site=site)
def flag_reindexing_amo(self, new_index, old_index, alias):
"""Flag the database for an AMO reindex."""
return self._flag_reindexing('amo', new_index, old_index, alias)
def _unflag_reindexing(self, site):
"""Unflag the database for a reindex on the given site."""
self.filter(site=site).delete()
def unflag_reindexing_amo(self):
"""Unflag the database for an AMO reindex."""
self._unflag_reindexing('amo')
def _is_reindexing(self, site):
"""Return True if a reindexing is occuring for the given site."""
return self.filter(site=site).exists()
def is_reindexing_amo(self):
"""Return True if a reindexing is occuring on AMO."""
return self._is_reindexing('amo')
def get_indices(self, index):
"""Return the indices associated with an alias.
If we are reindexing, there should be two indices returned.
"""
try:
reindex = self.get(alias=index)
# Yes. Let's reindex on both indexes.
return [idx for idx in reindex.new_index, reindex.old_index
if idx is not None]
except Reindexing.DoesNotExist:
return [index]
class Reindexing(models.Model):
SITE_CHOICES = (
('amo', 'AMO'),
)
start_date = models.DateTimeField(default=timezone.now)
old_index = models.CharField(max_length=255, null=True)
new_index = models.CharField(max_length=255)
alias = models.CharField(max_length=255)
site = models.CharField(max_length=3, choices=SITE_CHOICES)
objects = ReindexingManager()
class Meta:
db_table = 'zadmin_reindexing'
|
bnzk/django-painless-redirects | refs/heads/develop | painless_redirects/tests/test_middleware.py | 2 | # coding: utf-8
# dont add this, request.path is non unicode in python 2.7
# or add it, as request.path shoudl be unicode anyway?!
# from __future__ import unicode_literals
from ..models import Redirect
try:
reload
except NameError:
from importlib import reload
from django.contrib.sites.models import Site
from django.http import QueryDict
from django.test import TestCase
from django.test import override_settings
from mock import Mock
from painless_redirects import conf
from ..middleware import ManualRedirectMiddleware, ForceSiteDomainRedirectMiddleware
no_auto_create = override_settings(
PAINLESS_REDIRECTS_AUTO_CREATE=False,
)
auto_create = override_settings(
PAINLESS_REDIRECTS_AUTO_CREATE=True,
)
class ForceSiteDomainRedirectMiddlewareTestCase(TestCase):
def setUp(self):
self.middleware = ForceSiteDomainRedirectMiddleware()
self.request = Mock()
self.request.is_secure = lambda: False
self.request.get_host = lambda: "nogood.com"
self.request.META = {}
self.request.GET = QueryDict("")
self.request.path = "/"
def test_no_redirect(self):
self.request.get_host = lambda: "example.com"
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
def test_debug_no_redirect(self):
with self.settings(DEBUG=True):
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
def test_must_redirect(self):
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://example.com/")
def test_must_redirect_preserves_path(self):
self.request.path = "/abc/def/yeah/"
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://example.com/abc/def/yeah/")
def test_must_redirect_preserves_getvars(self):
self.request.path = "/abc/def/yeah/"
self.request.GET = QueryDict("karma=true")
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://example.com/abc/def/yeah/?karma=true")
class ManualRedirectMiddlewareTestCase(TestCase):
"""
request.get_current_site() is always the default example.com fixture
check: http://blog.namis.me/2012/05/13/writing-unit-tests-for-django-middleware/
"""
def setUp(self):
self._setup_request_response_middleware()
self.redirect = Redirect.objects.create(
old_path="/the-old-path/",
new_path="/the-new-path/",
)
self.site = Site.objects.create(
name="example site 1",
domain="example1.com",
)
self.site2 = Site.objects.create(
name="example site 2",
domain="example2.com",
)
def _setup_request_response_middleware(self):
self.middleware = ManualRedirectMiddleware()
self.request = Mock()
self.request.META = {}
self.request.get_host = lambda: 'host.com'
self.response = Mock()
def test_no_404_on_status_200(self):
self.request.path = self.redirect.old_path
self.response.status_code = 200
self.assertEqual(
self.middleware.process_response(self.request, self.response),
self.response)
@no_auto_create
def test_no_redirect_found(self):
reload(conf)
self.request.path = "/some-other-path/"
self.response.status_code = 404
self.assertEqual(
self.middleware.process_response(self.request, self.response),
self.response)
self.assertEqual(1, Redirect.objects.all().count())
@no_auto_create
def test_no_redirect_when_site_specified(self):
reload(conf)
self.redirect.site = self.site
self.redirect.save()
self.request.path = self.redirect.old_path
self.response.status_code = 404
self.assertEqual(
self.middleware.process_response(self.request, self.response),
self.response)
self.assertEqual(1, Redirect.objects.all().count())
def test_simple_redirect(self):
reload(conf)
self.response.status_code = 404
self.request.path = self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
def test_simple_redirect_302(self):
reload(conf)
self.redirect.permanent = False
self.redirect.save()
self.response.status_code = 404
self.request.path = self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "/the-new-path/")
self.redirect.refresh_from_db()
self.assertEqual(self.redirect.total_hits(), 1)
self.middleware.process_response(self.request, self.response)
self.middleware.process_response(self.request, self.response)
self.redirect.refresh_from_db()
self.assertEqual(self.redirect.total_hits(), 3)
def test_redirect_not_enabled(self):
reload(conf)
self.redirect.permanent = False
self.redirect.enabled = False
self.redirect.save()
self.response.status_code = 404
self.request.path = self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.redirect.refresh_from_db()
self.assertEqual(self.redirect.total_hits(), 1)
self.middleware.process_response(self.request, self.response)
self.middleware.process_response(self.request, self.response)
self.redirect.refresh_from_db()
self.assertEqual(self.redirect.total_hits(), 3)
def test_simple_redirect_keep_querystring(self):
self.response.status_code = 404
self.request.path = self.redirect.old_path
self.request.META['QUERY_STRING'] = 'a=b'
self.redirect.keep_querystring = True
self.redirect.old_path += "?a=b"
self.redirect.save()
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/?a=b")
def test_simple_redirect_drop_querystring(self):
self.response.status_code = 404
self.request.path = self.redirect.old_path
self.request.META['QUERY_STRING'] = 'a=xy'
self.redirect.old_path += "?a=xy"
self.redirect.save()
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
@auto_create
def test_wildcard_should_work_with_existing_auto_created_that_is_disabled(self):
"""
jap. it should!
:return:
"""
reload(conf)
old_path = '/the-old-path/'
self.response.status_code = 404
self.request.path = '{}{}'.format(old_path, 'wildcard/maybe/')
self.redirect.enabled = False
self.redirect.save()
self.assertEqual(Redirect.objects.filter(enabled=True).count(), 0)
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 2) # auto created one!
# the auto redirects
self.redirect.enabled = True
self.redirect.save()
self.assertEqual(Redirect.objects.filter(enabled=True).count(), 1)
# with existing auto created redirect!
self.redirect.wildcard_match = True
self.redirect.enabled = True
self.redirect.save()
self._setup_request_response_middleware()
self.response.status_code = 404
self.request.path = '{}{}'.format(self.redirect.old_path, 'wildcard/maybe/')
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual("/the-new-path/", response.url, )
self.assertEqual(Redirect.objects.count(), 2)
@no_auto_create
def test_special_chars_in_url(self):
"""
in python 2.7, request.path seems to be ascii, in certain deployment scenarios
only reproducable when not importing from __future__ import unicode_literals
probably related: https://serverfault.com/questions/359934/unicodeencodeerror-when-uploading-files-in-django-admin
only happened on a uwsgi configuration for now.
"""
reload(conf)
self.response.status_code = 404
self.request.path = self.redirect.old_path
self.request.path = "/2011/11/réééédirect/"
self.request.META['QUERY_STRING'] = "?what=ééé"
response = self.middleware.process_response(self.request, self.response)
# only check if it doesnt fail for now.
self.assertEqual(response.status_code, 404)
def test_new_site_redirect(self):
self.redirect.new_site = self.site
self.redirect.save()
self.response.status_code = 404
self.request.scheme = "https"
self.request.path = "/the-old-path/"
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(
response.url, "https://%s%s" % (self.redirect.new_site.domain, self.redirect.new_path))
def test_wildcard_redirect(self):
self.redirect.old_path = "/the-wildcard/yes/"
self.redirect.wildcard_match = True
self.redirect.save()
self.response.status_code = 404
self.request.path = "%sthe/right/part/" % self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
def test_wildcard_redirect_keep_tree(self):
self.redirect.old_path = "/the-wildcard/yes/"
self.redirect.wildcard_match = True
self.redirect.keep_tree = True
self.redirect.save()
self.response.status_code = 404
self.request.path = "%sthe/right/part/" % self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/the/right/part/")
# must work with site too
# self.redirect.site = self.site
self.redirect.save()
self._setup_request_response_middleware() # re-init
self.response.status_code = 404
self.request.path = "%sthe/right/part/2" % self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/the/right/part/2")
def test_wildcard_redirect_with_site(self):
self.redirect.site = Site.objects.get_current()
self.redirect.old_path = "/the-wildcard/yes/"
self.redirect.wildcard_match = True
self.redirect.save()
self.response.status_code = 404
self.request.path = "%sthe/right/part/" % self.redirect.old_path
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
def test_redirect_without_slash(self):
self.redirect.old_path = '/whatever/check.html'
self.redirect.save()
self.request.path = self.redirect.old_path
self.response.status_code = 404
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "/the-new-path/")
def test_from_custom_domain(self):
self.redirect.domain = 'custom.com'
self.redirect.old_path = '/'
self.redirect.new_path = 'http://another.com/'
self.redirect.save()
self.request.path = self.redirect.old_path
self.request.get_host = lambda: 'custom.com'
self.response.status_code = 200
response = self.middleware.process_request(self.request, )
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://another.com/")
def test_from_custom_domain_false_positive(self):
self.redirect.domain = 'custom.com'
self.redirect.old_path = '/'
self.redirect.new_path = 'http://another.com/'
self.redirect.save()
self.request.path = self.redirect.old_path
# check for false positives!
self.request.get_host = lambda: 'none-or-what.com'
self.response.status_code = 200
response = self.middleware.process_request(self.request)
self.assertEqual(response, None)
response = self.middleware.process_response(self.request, self.response)
self.assertNotEqual(response.status_code, 301)
# self.assertEqual(response.url, "http://another.com/")
def test_old_path_too_long(self):
reload(conf)
very_long = '/'
for c in range(0, conf.INDEXED_CHARFIELD_MAX_LENGTH):
very_long += 'ccccc'
self.assertGreater(len(very_long), conf.INDEXED_CHARFIELD_MAX_LENGTH)
self.request.path = very_long + "/"
# check for false positives!
self.response.status_code = 404
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(404, response.status_code)
self.assertEqual(2, Redirect.objects.all().count())
self.assertEqual(conf.INDEXED_CHARFIELD_MAX_LENGTH, len(Redirect.objects.all()[0].old_path))
@auto_create
def test_auto_create_with_locale_middleware(self):
# will be redirected to /en/' by locale middleware later on!
self.request.path = '/?test'
self.response.status_code = 404
self.assertEqual(Redirect.objects.all().count(), 1)
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 1)
# 404 with lang slug > auto create ok!
self.response.status_code = 404
self.request.path = '/nothing-yet/'
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 2)
@auto_create
def test_auto_create_respect_append_slash(self):
# will be redirected to /nope/' by locale commonmiddleware later on!
self.request.path = '/nope'
self.response.status_code = 404
self.assertEqual(Redirect.objects.all().count(), 1)
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 1)
# 404 with lang slug > auto create ok!
self.response.status_code = 404
self.request.path = '/nothing-yet/'
response = self.middleware.process_response(self.request, self.response)
self.assertEqual(response.status_code, 404)
self.assertEqual(Redirect.objects.all().count(), 2)
|
coreycb/charm-keystone | refs/heads/master | charmhelpers/core/kernel.py | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
from charmhelpers.osplatform import get_platform
from charmhelpers.core.hookenv import (
log,
INFO
)
__platform__ = get_platform()
if __platform__ == "ubuntu":
from charmhelpers.core.kernel_factory.ubuntu import (
persistent_modprobe,
update_initramfs,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.kernel_factory.centos import (
persistent_modprobe,
update_initramfs,
) # flake8: noqa -- ignore F401 for this import
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def modprobe(module, persist=True):
"""Load a kernel module and configure for auto-load on reboot."""
cmd = ['modprobe', module]
log('Loading kernel module %s' % module, level=INFO)
subprocess.check_call(cmd)
if persist:
persistent_modprobe(module)
def rmmod(module, force=False):
"""Remove a module from the linux kernel"""
cmd = ['rmmod']
if force:
cmd.append('-f')
cmd.append(module)
log('Removing kernel module %s' % module, level=INFO)
return subprocess.check_call(cmd)
def lsmod():
"""Shows what kernel modules are currently loaded"""
return subprocess.check_output(['lsmod'],
universal_newlines=True)
def is_module_loaded(module):
"""Checks if a kernel module is already loaded"""
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
return len(matches) > 0
|
cjh1/StarCluster | refs/heads/develop | starcluster/tests/test_config.py | 19 | # Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import os
import copy
import tempfile
import logging
logging.disable(logging.WARN)
from starcluster import exception
from starcluster import tests
from starcluster import static
from starcluster import config
from starcluster import utils
class TestStarClusterConfig(tests.StarClusterTest):
def test_valid_config_template(self):
self.config
def test_config_dne(self):
tmp_file = tempfile.NamedTemporaryFile()
non_existent_file = tmp_file.name
tmp_file.close()
assert not os.path.exists(non_existent_file)
try:
config.StarClusterConfig(non_existent_file, cache=True).load()
except exception.ConfigNotFound:
pass
else:
raise Exception('config loaded non-existent config file %s' %
non_existent_file)
def test_get_cluster(self):
try:
self.config.get_cluster_template('no_such_cluster')
except exception.ClusterTemplateDoesNotExist:
pass
else:
raise Exception('config returned non-existent cluster')
def test_int_required(self):
cases = [{'c1_size': '-s'}, {'c1_size': 2.5}, {'v1_partition': 'asdf'},
{'v1_partition': 0.33}]
for case in cases:
try:
self.get_custom_config(**case)
except exception.ConfigError:
pass
else:
raise Exception('config is not enforcing ints correctly')
def test_bool_required(self):
cases = [{'enable_experimental': 2}]
for case in cases:
try:
self.get_custom_config(**case)
except exception.ConfigError:
pass
else:
raise Exception("config is not enforcing strs correctly")
def test_missing_required(self):
cfg = self.config._config
section_copy = copy.deepcopy(cfg._sections)
for setting in static.CLUSTER_SETTINGS:
if not static.CLUSTER_SETTINGS[setting][1]:
continue
del cfg._sections['cluster c1'][setting]
try:
self.config.load()
except exception.ConfigError:
pass
else:
raise Exception(
"config is not enforcing required setting '%s'" % setting)
cfg._sections = copy.deepcopy(section_copy)
def test_volumes(self):
c1 = self.config.get_cluster_template('c1')
vols = c1.volumes
assert len(vols) == 3
assert 'v1' in vols
v1 = vols['v1']
assert 'volume_id' in v1 and v1['volume_id'] == 'vol-c999999'
assert 'device' in v1 and v1['device'] == '/dev/sdj'
assert 'partition' in v1 and v1['partition'] == '/dev/sdj1'
assert 'mount_path' in v1 and v1['mount_path'] == '/volume1'
assert 'v2' in vols
v2 = vols['v2']
assert 'volume_id' in v2 and v2['volume_id'] == 'vol-c888888'
assert 'device' in v2 and v2['device'] == '/dev/sdk'
assert 'partition' in v2 and v2['partition'] == '/dev/sdk1'
assert 'mount_path' in v2 and v2['mount_path'] == '/volume2'
assert 'v3' in vols
v3 = vols['v3']
assert 'volume_id' in v3 and v3['volume_id'] == 'vol-c777777'
assert 'device' in v3 and v3['device'] == '/dev/sdl'
assert 'partition' in v3 and v3['partition'] == '/dev/sdl1'
assert 'mount_path' in v3 and v3['mount_path'] == '/volume3'
def test_volume_not_defined(self):
try:
self.get_custom_config(**{'c1_vols': 'v1,v2,v2323'})
except exception.ConfigError:
pass
else:
raise Exception(
'config allows non-existent volumes to be specified')
def test_clusters(self):
assert 'c1' in self.config.clusters
assert 'c2' in self.config.clusters
assert 'c3' in self.config.clusters
def test_extends(self):
c1 = self.config.clusters.get('c1')
c2 = self.config.clusters.get('c2')
c3 = self.config.clusters.get('c3')
c2_settings = ['__name__', 'extends', 'keyname', 'key_location',
'cluster_size', 'node_instance_type',
'master_instance_type', 'volumes']
c3_settings = ['__name__', 'extends', 'keyname', 'key_location',
'cluster_size', 'volumes']
for key in c1:
if key in c2 and key not in c2_settings:
assert c2[key] == c1[key]
else:
# below only true for default test config
# not required in general
assert c2[key] != c1[key]
for key in c2:
if key in c3 and key not in c3_settings:
assert c3[key] == c2[key]
else:
# below only true for default test config
# not required in general
assert c3[key] != c2[key]
def test_order_invariance(self):
"""
Loads all cluster sections in the test config in all possible orders
(i.e. c1,c2,c3, c3,c1,c2, etc.) and test that the results are the same
"""
cfg = self.config
orig = cfg.clusters
cfg.clusters = None
sections = cfg._get_sections('cluster')
for perm in utils.permute(sections):
new = cfg._load_cluster_sections(perm)
assert new == orig
def test_plugins(self):
c1 = self.config.get_cluster_template('c1')
plugs = c1.plugins
assert len(plugs) == 3
# test that order is preserved
p1, p2, p3 = plugs
p1_name = p1.__name__
p1_class = utils.get_fq_class_name(p1)
p2_name = p2.__name__
p2_class = utils.get_fq_class_name(p2)
p3_name = p3.__name__
p3_class = utils.get_fq_class_name(p3)
assert p1_name == 'p1'
assert p1_class == 'starcluster.tests.mytestplugin.SetupClass'
assert p1.my_arg == '23'
assert p1.my_other_arg == 'skidoo'
assert p2_name == 'p2'
setup_class2 = 'starcluster.tests.mytestplugin.SetupClass2'
assert p2_class == setup_class2
assert p2.my_arg == 'hello'
assert p2.my_other_arg == 'world'
assert p3_name == 'p3'
setup_class3 = 'starcluster.tests.mytestplugin.SetupClass3'
assert p3_class == setup_class3
assert p3.my_arg == 'bon'
assert p3.my_other_arg == 'jour'
assert p3.my_other_other_arg == 'monsignour'
def test_plugin_not_defined(self):
try:
self.get_custom_config(**{'c1_plugs': 'p1,p2,p233'})
except exception.ConfigError:
pass
else:
raise Exception(
'config allows non-existent plugins to be specified')
def test_keypairs(self):
kpairs = self.config.keys
assert len(kpairs) == 3
k1 = kpairs.get('k1')
k2 = kpairs.get('k2')
k3 = kpairs.get('k3')
dcfg = tests.templates.config.default_config
k1_location = os.path.expanduser(dcfg['k1_location'])
k2_location = dcfg['k2_location']
k3_location = dcfg['k3_location']
assert k1 and k1['key_location'] == k1_location
assert k2 and k2['key_location'] == k2_location
assert k3 and k3['key_location'] == k3_location
def test_keypair_not_defined(self):
try:
self.get_custom_config(**{'c1_keyname': 'k2323'})
except exception.ConfigError:
pass
else:
raise Exception(
'config allows non-existent keypairs to be specified')
def test_invalid_config(self):
"""
Test that reading a non-INI formatted file raises an exception
"""
tmp_file = tempfile.NamedTemporaryFile()
tmp_file.write(
"<html>random garbage file with no section headings</html>")
tmp_file.flush()
try:
config.StarClusterConfig(tmp_file.name, cache=True).load()
except exception.ConfigHasNoSections:
pass
else:
raise Exception("config allows non-INI formatted files")
def test_empty_config(self):
"""
Test that reading an empty config generates no errors and that aws
credentials can be read from the environment.
"""
aws_key = 'testkey'
aws_secret_key = 'testsecret'
os.environ['AWS_ACCESS_KEY_ID'] = aws_key
os.environ['AWS_SECRET_ACCESS_KEY'] = aws_secret_key
tmp_file = tempfile.NamedTemporaryFile()
cfg = config.StarClusterConfig(tmp_file.name, cache=True).load()
assert cfg.aws['aws_access_key_id'] == aws_key
assert cfg.aws['aws_secret_access_key'] == aws_secret_key
del os.environ['AWS_ACCESS_KEY_ID']
del os.environ['AWS_SECRET_ACCESS_KEY']
def test_cyclical_extends(self):
"""
Test that cyclical extends in the config raises an exception
"""
try:
self.get_custom_config(**{'c2_extends': 'c3',
'c3_extends': 'c2'})
self.get_custom_config(**{'c2_extends': 'c3',
'c3_extends': 'c4',
'c4_extends': 'c2'})
except exception.ConfigError:
pass
else:
raise Exception('config allows cyclical extends graph')
def test_choices(self):
"""
Test that config enforces a value to be one of a list of choices if
specified
"""
try:
self.get_custom_config(**{'c1_shell': 'blahblah'})
except exception.ConfigError:
pass
else:
raise Exception('config not enforcing choices for setting')
def test_multiple_instance_types(self):
"""
Test that config properly handles multiple instance types syntax
(within node_instance_type setting)
"""
invalid_cases = [
{'c1_node_type': 'c1.xlarge:ami-asdffdas'},
{'c1_node_type': 'c1.xlarge:3'},
{'c1_node_type': 'c1.xlarge:ami-asdffdas:3'},
{'c1_node_type': 'c1.xlarge:asdf:asdf:asdf,m1.small'},
{'c1_node_type': 'c1.asdf:4, m1.small'},
{'c1_node_type': 'c1.xlarge: 0, m1.small'},
{'c1_node_type': 'c1.xlarge:-1, m1.small'}]
for case in invalid_cases:
try:
self.get_custom_config(**case)
except exception.ConfigError:
pass
else:
raise Exception(('config allows invalid multiple instance ' +
'type syntax: %s') % case)
valid_cases = [
{'c1_node_type': 'c1.xlarge:3, m1.small'},
{'c1_node_type': 'c1.xlarge:ami-asdfasdf:3, m1.small'},
{'c1_node_type': 'c1.xlarge:ami-asdfasdf:3, m1.large, m1.small'},
{'c1_node_type': 'm1.large, c1.xlarge:ami-asdfasdf:3, m1.large, ' +
'm1.small'},
{'c1_node_type': 'c1.xlarge:ami-asdfasdf:2, m1.large:2, m1.small'},
]
for case in valid_cases:
try:
self.get_custom_config(**case)
except exception.ConfigError:
raise Exception(('config rejects valid multiple instance ' +
'type syntax: %s') % case)
def test_inline_comments(self):
"""
Test that config ignores inline comments.
"""
invalid_case = {'c1_node_type': 'c1.xlarge:3, m1.small# some comment'}
try:
self.get_custom_config(**invalid_case)
except exception.ConfigError:
pass
else:
raise Exception(('config incorrectly ignores line with non-inline '
'comment pound sign: %s') % invalid_case)
valid_case = {'c1_node_type': 'c1.xlarge:3, m1.small # some #comment '}
try:
self.get_custom_config(**valid_case)
except exception.ConfigError:
raise Exception(('config does not ignore inline '
'comment: %s') % valid_case)
|
jantuomi/py3status | refs/heads/master | py3status/modules/pingdom.py | 10 | # -*- coding: utf-8 -*-
"""
Display the latest response time of the configured Pingdom checks.
We also verify the status of the checks and colorize if needed.
Pingdom API doc : https://www.pingdom.com/features/api/documentation/
Configuration parameters:
- app_key : create an APP KEY on pingdom first
- cache_timeout : how often to refresh the check from pingdom
- checks : comma separated pindgom check names to display
- login : pingdom login
- max_latency : maximal latency before coloring the output
- password : pingdom password
- request_timeout : pindgom API request timeout
Requires:
- requests python module from pypi
https://pypi.python.org/pypi/requests
"""
import requests
from time import time
class Py3status:
"""
"""
# available configuration parameters
app_key = ''
cache_timeout = 600
checks = ''
login = ''
max_latency = 500
password = ''
request_timeout = 15
def pingdom_checks(self, i3s_output_list, i3s_config):
response = {'full_text': ''}
# parse some configuration parameters
if not isinstance(self.checks, list):
self.checks = self.checks.split(',')
r = requests.get(
'https://api.pingdom.com/api/2.0/checks',
auth=(self.login, self.password),
headers={'App-Key': self.app_key},
timeout=self.request_timeout,
)
result = r.json()
if 'checks' in result:
for check in [
ck for ck in result['checks'] if ck['name'] in self.checks
]:
if check['status'] == 'up':
response['full_text'] += '{}: {}ms, '.format(
check['name'],
check['lastresponsetime']
)
if check['lastresponsetime'] > self.max_latency:
response.update(
{'color': i3s_config['color_degraded']}
)
else:
response['full_text'] += '{}: DOWN'.format(
check['name'],
check['lastresponsetime']
)
response.update({'color': i3s_config['color_bad']})
response['full_text'] = response['full_text'].strip(', ')
response['cached_until'] = time() + self.cache_timeout
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_bad': '#FF0000',
}
while True:
print(x.pingdom_checks([], config))
sleep(1)
|